def showsls(sls, values, outpath, show=False): from dipy.viz import window, actor, fvtk from dipy.data import fetch_bundles_2_subjects, read_bundles_2_subjects from dipy.tracking.streamline import transform_streamlines #renderer.clear() from dipy.tracking.streamline import length renderer = window.Renderer() hue = [0.5, 1] # white to purple to red saturation = [0.0, 1.0] # black to white lut_cmap = actor.colormap_lookup_table( scale_range=(values.min(), np.percentile(values, 50)), hue_range=hue, saturation_range=saturation) stream_actor5 = actor.line(sls, values, linewidth=0.1, lookup_colormap=lut_cmap) renderer.add(stream_actor5) bar3 = actor.scalar_bar(lut_cmap) renderer.add(bar3) # window.show(renderer, size=(600, 600), reset_camera=False) if outpath: window.record(renderer, out_path=outpath, size=(600, 600)) if show: fvtk.show(renderer)
def main(): parser = _build_arg_parser() args = parser.parse_args() output_names = [ 'axial_superior', 'axial_inferior', 'coronal_posterior', 'coronal_anterior', 'sagittal_left', 'sagittal_right' ] output_paths = [ os.path.join(os.path.dirname(args.output), '{}_' + os.path.basename(args.output)).format(name) for name in output_names ] assert_inputs_exist(parser, [args.bundle, args.map]) assert_outputs_exists(parser, args, output_paths) assignment = np.load(args.map)['arr_0'] lut = actor.colormap_lookup_table(scale_range=(np.min(assignment), np.max(assignment)), hue_range=(0.1, 1.), saturation_range=(1, 1.), value_range=(1., 1.)) tubes = actor.line(nib.streamlines.load(args.bundle).streamlines, assignment, lookup_colormap=lut) scalar_bar = actor.scalar_bar(lut) ren = window.Renderer() ren.add(tubes) ren.add(scalar_bar) window.snapshot(ren, output_paths[0]) ren.pitch(180) ren.reset_camera() window.snapshot(ren, output_paths[1]) ren.pitch(90) ren.set_camera(view_up=(0, 0, 1)) ren.reset_camera() window.snapshot(ren, output_paths[2]) ren.pitch(180) ren.set_camera(view_up=(0, 0, 1)) ren.reset_camera() window.snapshot(ren, output_paths[3]) ren.yaw(90) ren.reset_camera() window.snapshot(ren, output_paths[4]) ren.yaw(180) ren.reset_camera() window.snapshot(ren, output_paths[5])
def show_fascicles_wholebrain(s_list, vec_vols, folder_name, mask_type, downsamp=1, scale=[3, 6], hue=[0.25, -0.05], saturation=[0.1, 1.0]): s_img = folder_name + r'\streamlines' + r'\fascicles_AxCaliber_weighted_3d_' + mask_type + '.png' #hue = [0.4, 0.7] # blues #hue = [0.25, -0.05] #Hot #hue = [0, 1] #All weighted = True ''' if weighted: scale = [0, 3] else: scale = [0, 6] vec_vols = np.log(vec_vols) #vec_vols = vec_vols-np.nanmin(vec_vols)/(np.nanmax(vec_vols)-np.nanmin(vec_vols)) ''' if downsamp != 1: vec_vols = vec_vols[::downsamp] s_list = s_list[::downsamp] lut_cmap = actor.colormap_lookup_table(hue_range=hue, saturation_range=saturation, scale_range=scale) bar = actor.scalar_bar(lut_cmap) #w_actor = actor.line(s_list, vec_vols, linewidth=1.2, lookup_colormap=lut_cmap) w_actor = actor.streamtube(s_list, vec_vols, linewidth=0.6, lookup_colormap=lut_cmap) #w_actor = actor.streamtube(s_list, vec_vols, linewidth=0.3, lookup_colormap=lut_cmap) #w_actor = actor.line(s_list, linewidth=1.0, lookup_colormap=lut_cmap) r = window.Renderer() #r.SetBackground(*window.colors.white) r.add(w_actor) r.add(bar) window.show(r) r.set_camera(r.camera_info()) window.record(r, out_path=s_img, size=(800, 800))
def change_streamlines_color(i_ren, obj, slider): global hue global lut_cmap #refreshing the hue and lut_cmap hue = [0, slider.value] lut_cmap = actor.colormap_lookup_table( scale_range=(0, max_weight), hue_range=hue, saturation_range=saturation) #refreshing the bar bar.SetLookupTable(lut_cmap) refresh_3_stream_actors() refresh_showManager(i_ren, obj, slider) return
def visualize_streamline(darray, score, save_able=False, save_name='default.png', control_par=1, hue=[0.5, 1]): data_evl = darray streamlines_evl = Streamlines() for i in range(np.shape(data_evl)[0]): tmp = data_evl[i] tmp = zero_remove(tmp) #tmp = tmp[~np.all(tmp == 0, axis=-1)] #tmp = np.around(tmp, decimals=0) streamlines_evl.append(tmp) mse_nor = score # Visualize the streamlines, colored by cci ren = window.Scene() saturation = [0.0, 1.0] lut_cmap = actor.colormap_lookup_table( scale_range=(min(mse_nor), max(mse_nor) / control_par), hue_range=hue, saturation_range=saturation) bar3 = actor.scalar_bar(lut_cmap) ren.add(bar3) stream_actor = actor.line(streamlines_evl, mse_nor, linewidth=0.1, lookup_colormap=lut_cmap) ren.add(stream_actor) if not save_able: interactive = True if interactive: window.show(ren) if save_able: window.record(ren, n_frames=1, out_path=save_name, size=(800, 800))
def show_tracts(hue, saturation, scale, streamlines, mean_vol_per_tract, folder_name, fig_type): from dipy.viz import window, actor lut_cmap = actor.colormap_lookup_table(hue_range=hue, saturation_range=saturation, scale_range=scale) streamlines_actor = actor.streamtube(streamlines, mean_vol_per_tract, linewidth=0.5, lookup_colormap=lut_cmap) bar = actor.scalar_bar(lut_cmap) r = window.Scene() r.add(streamlines_actor) r.add(bar) mean_pasi_weighted_img = f'{folder_name}{os.sep}streamlines{os.sep}mean_pasi_weighted{fig_type}.png' window.show(r) r.set_camera(r.camera_info()) window.record(r, out_path=mean_pasi_weighted_img, size=(800, 800))
def cc_part_viz_running_script(n, folder_name): folder_name = folder_name + r'\streamlines' hue = [0.0, 1.0] saturation = [0.0, 1.0] scale = [3, 7] lut_cmap = actor.colormap_lookup_table(hue_range=hue, saturation_range=saturation, scale_range=scale) bar = actor.scalar_bar(lut_cmap) fascicle_name = '\cc_parts' g_mean, b_mean, s_mean = calc_mean_cc_vals(fascicle_name) g_path = folder_name + n + r'_genu_cortex_cleaned.trk' streamlines_g = load_ft(g_path) b_path = folder_name + n + r'_body_cortex_cleaned.trk' streamlines_b = load_ft(b_path) s_path = folder_name + n + r'_splenium_cortex_cleaned.trk' streamlines_s = load_ft(s_path) show_cc_parts_weighted(streamlines_g, streamlines_b, streamlines_s, g_mean, b_mean, s_mean, folder_name, lut_cmap, bar)
def weighting_streamlines( out_folder_name, streamlines, bvec_file, weight_by="1.5_2_AxPasi5", hue=[0.0, 1.0], saturation=[0.0, 1.0], scale=[2, 7], fig_type="", ): """ weight_by = '1.5_2_AxPasi5' hue = [0.0,1.0] saturation = [0.0,1.0] scale = [3,6] """ from dipy.viz import window, actor from dipy.tracking.streamline import values_from_volume weight_by_data, affine = load_weight_by_img(bvec_file, weight_by) stream = list(streamlines) vol_per_tract = values_from_volume(weight_by_data, stream, affine=affine) pfr_data = load_weight_by_img(bvec_file, "1.5_2_AxFr5")[0] pfr_per_tract = values_from_volume(pfr_data, stream, affine=affine) # Leave out from the calculation of mean value per tract, a chosen quantile: vol_vec = weight_by_data.flatten() q = np.quantile(vol_vec[vol_vec > 0], 0.95) mean_vol_per_tract = [] for s, pfr in zip(vol_per_tract, pfr_per_tract): s = np.asanyarray(s) non_out = [s < q] pfr = np.asanyarray(pfr) high_pfr = [pfr > 0.5] mean_vol_per_tract.append(np.nanmean(s[tuple(non_out and high_pfr)])) lut_cmap = actor.colormap_lookup_table(hue_range=hue, saturation_range=saturation, scale_range=scale) streamlines_actor = actor.line(streamlines, mean_vol_per_tract, linewidth=1, lookup_colormap=lut_cmap) bar = actor.scalar_bar(lut_cmap) r = window.Renderer() r.add(streamlines_actor) r.add(bar) # mean_pasi_weighted_img = out_folder_name+'\streamlines\mean_pasi_weighted' + fig_type + '.png' mean_pasi_weighted_img = f"{out_folder_name}/mean_pasi_weighted{fig_type}.png" # window.show(r) # r.set_camera(r.camera_info()) r.set_camera( position=(-389.00, 225.24, 62.02), focal_point=(1.78, -3.27, -12.65), view_up=(0.00, -0.31, 0.95), ) # window.record(r, out_path=mean_pasi_weighted_img, size=(800, 800)) window.snapshot(r, fname=mean_pasi_weighted_img, size=(800, 800)) return r
hue = [0.0, 1.0] saturation = [0.0, 1.0] scale = [0, 100] weight_by_file = bvec_file[:-5:] + '_' + weight_by + '.nii' weight_by_img = nib.load(weight_by_file) weight_by_data = weight_by_img.get_data() affine = weight_by_img.affine stream = list(streamlines) vol_per_tract = values_from_volume(weight_by_data, stream, affine=affine) pfr_file = bvec_file[:-5:] + '_pfrS.nii' pfr_img = nib.load(pfr_file) pfr_data = pfr_img.get_data() lut_cmap = actor.colormap_lookup_table(hue_range=hue, saturation_range=saturation, scale_range=scale) streamlines_actor = actor.line(streamlines_native, pfr_data, lookup_colormap=lut_cmap) bar = actor.scalar_bar() r = window.Renderer() r.add(streamlines_actor) r.add(bar) window.show(r) fig_path = main_folder + r"\streamlines" + s + '_genu_dist.png' r.set_camera(r.camera_info()) window.record(r, path_numbering=True, n_frames=3,
def test_slicer(): renderer = window.renderer() data = (255 * np.random.rand(50, 50, 50)) affine = np.eye(4) slicer = actor.slicer(data, affine) slicer.display(None, None, 25) window.add(renderer, slicer) renderer.reset_camera() renderer.reset_clipping_range() # window.show(renderer) # copy pixels in numpy array directly arr = window.snapshot(renderer, 'test_slicer.png') import scipy print(scipy.__version__) print(scipy.__file__) print(arr.sum()) print(np.sum(arr == 0)) print(np.sum(arr > 0)) print(arr.shape) print(arr.dtype) report = window.analyze_snapshot(arr, find_objects=True) print(report) npt.assert_equal(report.objects, 1) # print(arr[..., 0]) # The slicer can cut directly a smaller part of the image slicer.display_extent(10, 30, 10, 30, 35, 35) renderer.ResetCamera() window.add(renderer, slicer) # save pixels in png file not a numpy array with TemporaryDirectory() as tmpdir: fname = os.path.join(tmpdir, 'slice.png') # window.show(renderer) arr = window.snapshot(renderer, fname) report = window.analyze_snapshot(fname, find_objects=True) npt.assert_equal(report.objects, 1) npt.assert_raises(ValueError, actor.slicer, np.ones(10)) renderer.clear() rgb = np.zeros((30, 30, 30, 3)) rgb[..., 0] = 1. rgb_actor = actor.slicer(rgb) renderer.add(rgb_actor) renderer.reset_camera() renderer.reset_clipping_range() arr = window.snapshot(renderer) report = window.analyze_snapshot(arr, colors=[(255, 0, 0)]) npt.assert_equal(report.objects, 1) npt.assert_equal(report.colors_found, [True]) lut = actor.colormap_lookup_table(scale_range=(0, 255), hue_range=(0.4, 1.), saturation_range=(1, 1.), value_range=(0., 1.)) renderer.clear() slicer_lut = actor.slicer(data, lookup_colormap=lut) slicer_lut.display(10, None, None) slicer_lut.display(None, 10, None) slicer_lut.display(None, None, 10) slicer_lut2 = slicer_lut.copy() slicer_lut2.display(None, None, 10) renderer.add(slicer_lut2) renderer.reset_clipping_range() arr = window.snapshot(renderer) report = window.analyze_snapshot(arr, find_objects=True) npt.assert_equal(report.objects, 1)
def test_bundle_maps(): renderer = window.renderer() bundle = fornix_streamlines() bundle, shift = center_streamlines(bundle) mat = np.array([[1, 0, 0, 100], [0, 1, 0, 100], [0, 0, 1, 100], [0, 0, 0, 1.]]) bundle = transform_streamlines(bundle, mat) # metric = np.random.rand(*(200, 200, 200)) metric = 100 * np.ones((200, 200, 200)) # add lower values metric[100, :, :] = 100 * 0.5 # create a nice orange-red colormap lut = actor.colormap_lookup_table(scale_range=(0., 100.), hue_range=(0., 0.1), saturation_range=(1, 1), value_range=(1., 1)) line = actor.line(bundle, metric, linewidth=0.1, lookup_colormap=lut) window.add(renderer, line) window.add(renderer, actor.scalar_bar(lut, ' ')) report = window.analyze_renderer(renderer) npt.assert_almost_equal(report.actors, 1) # window.show(renderer) renderer.clear() nb_points = np.sum([len(b) for b in bundle]) values = 100 * np.random.rand(nb_points) # values[:nb_points/2] = 0 line = actor.streamtube(bundle, values, linewidth=0.1, lookup_colormap=lut) renderer.add(line) # window.show(renderer) report = window.analyze_renderer(renderer) npt.assert_equal(report.actors_classnames[0], 'vtkLODActor') renderer.clear() colors = np.random.rand(nb_points, 3) # values[:nb_points/2] = 0 line = actor.line(bundle, colors, linewidth=2) renderer.add(line) # window.show(renderer) report = window.analyze_renderer(renderer) npt.assert_equal(report.actors_classnames[0], 'vtkLODActor') # window.show(renderer) arr = window.snapshot(renderer) report2 = window.analyze_snapshot(arr) npt.assert_equal(report2.objects, 1) # try other input options for colors renderer.clear() actor.line(bundle, (1., 0.5, 0)) actor.line(bundle, np.arange(len(bundle))) actor.line(bundle) colors = [np.random.rand(*b.shape) for b in bundle] actor.line(bundle, colors=colors)
an FA image and showing it in a non-standard way using an HSV colormap. """ fname_fa = os.path.join(os.path.expanduser('~'), '.dipy', 'exp_bundles_and_maps', 'bundles_2_subjects', 'subj_1', 'fa_1x1x1.nii.gz') img = nib.load(fname_fa) fa = img.get_data() """ Notice here how the scale range is (0, 255) and not (0, 1) which is the usual range of FA values. """ lut = actor.colormap_lookup_table(scale_range=(0, 255), hue_range=(0.4, 1.), saturation_range=(1, 1.), value_range=(0., 1.)) """ This is because the lookup table is applied in the slice after interpolating to (0, 255). """ fa_actor = actor.slicer(fa, affine, lookup_colormap=lut) renderer.clear() renderer.add(fa_actor) renderer.reset_camera() renderer.zoom(1.4) # window.show(renderer, size=(600, 600), reset_camera=False)
def plot_bundles_with_metric(bundle_path, endings_path, brain_mask_path, bundle, metrics, output_path, tracking_format="trk_legacy", show_color_bar=True): import seaborn as sns # import in function to avoid error if not installed (this is only needed in this function) from dipy.viz import actor, window from tractseg.libs import vtk_utils def _add_extra_point_to_last_streamline(sl): # Coloring broken as soon as all streamlines have same number of points -> why??? # Add one number to last streamline to make it have a different number sl[-1] = np.append(sl[-1], [sl[-1][-1]], axis=0) return sl # Settings NR_SEGMENTS = 100 ANTI_INTERPOL_MULT = 1 # increase number of points to avoid interpolation to blur the colors algorithm = "distance_map" # equal_dist | distance_map | cutting_plane # colors = np.array(sns.color_palette("coolwarm", NR_SEGMENTS)) # colormap blue to red (does not fit to colorbar) colors = np.array(sns.light_palette( "red", NR_SEGMENTS)) # colormap only red, which fits to color_bar img_size = (1000, 1000) # Tractometry skips first and last element. Therefore we only have 98 instead of 100 elements. # Here we duplicate the first and last element to get back to 100 elements metrics = list(metrics) metrics = np.array([metrics[0]] + metrics + [metrics[-1]]) metrics_max = metrics.max() metrics_min = metrics.min() if metrics_max == metrics_min: metrics = np.zeros(len(metrics)) else: metrics = img_utils.scale_to_range( metrics, range=(0, 99)) # range needs to be same as segments in colormap orientation = dataset_specific_utils.get_optimal_orientation_for_bundle( bundle) # Load mask beginnings_img = nib.load(endings_path) beginnings = beginnings_img.get_data() for i in range(1): beginnings = binary_dilation(beginnings) # Load trackings if tracking_format == "trk_legacy": streams, hdr = trackvis.read(bundle_path) streamlines = [s[0] for s in streams] else: sl_file = nib.streamlines.load(bundle_path) streamlines = sl_file.streamlines # Reduce streamline count streamlines = streamlines[::2] # Reorder to make all streamlines have same start region streamlines = fiber_utils.add_to_each_streamline(streamlines, 0.5) streamlines_new = [] for idx, sl in enumerate(streamlines): startpoint = sl[0] # Flip streamline if not in right order if beginnings[int(startpoint[0]), int(startpoint[1]), int(startpoint[2])] == 0: sl = sl[::-1, :] streamlines_new.append(sl) streamlines = fiber_utils.add_to_each_streamline(streamlines_new, -0.5) if algorithm == "distance_map" or algorithm == "equal_dist": streamlines = fiber_utils.resample_fibers( streamlines, NR_SEGMENTS * ANTI_INTERPOL_MULT) elif algorithm == "cutting_plane": streamlines = fiber_utils.resample_to_same_distance( streamlines, max_nr_points=NR_SEGMENTS, ANTI_INTERPOL_MULT=ANTI_INTERPOL_MULT) # Cut start and end by percentage # streamlines = FiberUtils.resample_fibers(streamlines, NR_SEGMENTS * ANTI_INTERPOL_MULT) # remove = int((NR_SEGMENTS * ANTI_INTERPOL_MULT) * 0.15) # remove X% in beginning and end # streamlines = np.array(streamlines)[:, remove:-remove, :] # streamlines = list(streamlines) if algorithm == "equal_dist": segment_idxs = [] for i in range(len(streamlines)): segment_idxs.append(list(range(NR_SEGMENTS * ANTI_INTERPOL_MULT))) segment_idxs = np.array(segment_idxs) elif algorithm == "distance_map": metric = AveragePointwiseEuclideanMetric() qb = QuickBundles(threshold=100., metric=metric) clusters = qb.cluster(streamlines) centroids = Streamlines(clusters.centroids) _, segment_idxs = cKDTree(centroids.data, 1, copy_data=True).query(streamlines, k=1) elif algorithm == "cutting_plane": streamlines_resamp = fiber_utils.resample_fibers( streamlines, NR_SEGMENTS * ANTI_INTERPOL_MULT) metric = AveragePointwiseEuclideanMetric() qb = QuickBundles(threshold=100., metric=metric) clusters = qb.cluster(streamlines_resamp) centroid = Streamlines(clusters.centroids)[0] # index of the middle cluster middle_idx = int(NR_SEGMENTS / 2) * ANTI_INTERPOL_MULT middle_point = centroid[middle_idx] segment_idxs = fiber_utils.get_idxs_of_closest_points( streamlines, middle_point) # Align along the middle and assign indices segment_idxs_eqlen = [] for idx, sl in enumerate(streamlines): sl_middle_pos = segment_idxs[idx] before_elems = sl_middle_pos after_elems = len(sl) - sl_middle_pos base_idx = 1000 # use higher index to avoid negative numbers for area below middle r = range((base_idx - before_elems), (base_idx + after_elems)) segment_idxs_eqlen.append(r) segment_idxs = segment_idxs_eqlen # Add extra point otherwise coloring BUG streamlines = _add_extra_point_to_last_streamline(streamlines) renderer = window.Renderer() colors_all = [] # final shape will be [nr_streamlines, nr_points, 3] for jdx, sl in enumerate(streamlines): colors_sl = [] for idx, p in enumerate(sl): if idx >= len(segment_idxs[jdx]): seg_idx = segment_idxs[jdx][idx - 1] else: seg_idx = segment_idxs[jdx][idx] m = metrics[int(seg_idx / ANTI_INTERPOL_MULT)] color = colors[int(m)] colors_sl.append(color) colors_all.append( colors_sl ) # this can not be converted to numpy array because last element has one more elem sl_actor = actor.streamtube(streamlines, colors=colors_all, linewidth=0.2, opacity=1) renderer.add(sl_actor) # plot brain mask mask = nib.load(brain_mask_path).get_data() cont_actor = vtk_utils.contour_from_roi_smooth( mask, affine=beginnings_img.affine, color=[.9, .9, .9], opacity=.2, smoothing=50) renderer.add(cont_actor) if show_color_bar: lut_cmap = actor.colormap_lookup_table(scale_range=(metrics_min, metrics_max), hue_range=(0.0, 0.0), saturation_range=(0.0, 1.0)) renderer.add(actor.scalar_bar(lut_cmap)) if orientation == "sagittal": renderer.set_camera(position=(-412.95, -34.38, 80.15), focal_point=(102.46, -16.96, -11.71), view_up=(0.1806, 0.0, 0.9835)) elif orientation == "coronal": renderer.set_camera(position=(-48.63, 360.31, 98.37), focal_point=(-20.16, 92.89, 36.02), view_up=(-0.0047, -0.2275, 0.9737)) elif orientation == "axial": pass else: raise ValueError("Invalid orientation provided") # Use this to interatively get new camera angle # window.show(renderer, size=img_size, reset_camera=False) # print(renderer.get_camera()) window.record(renderer, out_path=output_path, size=img_size)
def change_TMS_effects(x, y, z): """ Computes the TMS effects for a given coil position (x,y,z) according to the existing theoretical models see Silva et al. (2008) Elucidating the mechanisms and loci of neuronal excitation by transcranial magnetic stimulation using a finite element model of a cortical sulcus https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2693370/ Parameters ---------- x,y,z : float Coordinates of a stimulation coil. Returns ------- my_lut : lookup_tablevtkLookupTable Lookup table for the colormap to be used when visualizing TMS effects over the streamlines my_colors : numpy array Contains colors encoing the TMS effects at each point of each of the streamlines effective_field : numpy array, float Contains two components of the TMS effects and their sum for each point of each of the streamlines. Saved as a txt file. mesh_file : gmsh structure A gmsh structure containing information about the incuded electric field. Saved as a msh file. """ l1 = 2 # membrane space constant 2mm l2 = l1**2 print(x, y, z) effect_max = 0.100 effect_min = -0.100 position = [x - 256 / 2, y - 256 / 2, z - 256 / 2 ] # -256/2 because of a freesurfer RAS coordinate system current_out_dir = out_dir + str(x) + '_' + str(y) + '_' + str(z) simulation(mesh_path, current_out_dir, pos_centre=position) mesh_file = current_out_dir + '/' + subject_name + '_TMS_1-0001_Magstim_70mm_Fig8_nii_scalar.msh' field_mesh = simnibs.msh.read_msh(mesh_file) field_as_nodedata = field_mesh.elmdata[0].as_nodedata() field_at_nodes = field_as_nodedata.value ttt = load_elems(field_mesh.nodes.node_coord, field_mesh.elm.node_number_list) effective_field = copy.deepcopy(new_streams_T1_array) for stream in range(len(new_streams_T1_array)): my_steam = copy.deepcopy(new_streams_T1_array[stream]) print('starting _' + str(stream) + ' out of ' + str(len(new_streams_T1_array))) for t in range(len(my_steam[:, 0])): # -256/2 because of a freesurfer RAS coordinate system x = my_steam[t, 0] - 256 / 2 y = my_steam[t, 1] - 256 / 2 z = my_steam[t, 2] - 256 / 2 xyz = np.asarray([x, y, z]) field_vector_xyz = get_field(ttt, xyz, field_at_nodes) effective_field[stream][t, 0] = l1 * np.dot( field_vector_xyz, streams_array_derivative[stream][t, :]) effective_field[stream][t, 1] = l2 * deriv_e_field( xyz, field_at_nodes, streams_array_derivative[stream][t, :], ttt) effective_field[stream][t, 2] = effective_field[stream][ t, 0] + effective_field[stream][t, 1] if (effective_field[stream][t, 2] < effect_min): effect_min = effective_field[stream][t, 2] if effective_field[stream][t, 2] > effect_max: effect_max = effective_field[stream][t, 2] with open(current_out_dir + '/' + subject_name + '_effective_field.txt', 'wb') as f: pickle.dump(effective_field, f) f.close() my_lut = actor.colormap_lookup_table(scale_range=(effect_min, effect_max), hue_range=(0.4, 1.), saturation_range=(1, 1.)) my_colors = calculate_new_colors(colors, bundle_native, effective_field, effect_min, effect_max) return my_lut, my_colors
def fiber_simple_3d_show_advanced(img, streamlines, colors=None, linewidth=1, s='png', imgcolor=False): streamlines = streamlines data = img.get_data() shape = img.shape affine = img.affine """ With our current design it is easy to decide in which space you want the streamlines and slices to appear. The default we have here is to appear in world coordinates (RAS 1mm). """ world_coords = True """ If we want to see the objects in native space we need to make sure that all objects which are currently in world coordinates are transformed back to native space using the inverse of the affine. """ if not world_coords: from dipy.tracking.streamline import transform_streamlines streamlines = transform_streamlines(streamlines, np.linalg.inv(affine)) """ Now we create, a ``Renderer`` object and add the streamlines using the ``line`` function and an image plane using the ``slice`` function. """ ren = window.Renderer() stream_actor = actor.line(streamlines, colors=colors, linewidth=linewidth) """img colormap""" if imgcolor: lut = actor.colormap_lookup_table(scale_range=(0, 1), hue_range=(0, 1.), saturation_range=(0., 1.), value_range=(0., 1.)) else: lut = None if not world_coords: image_actor_z = actor.slicer(data, affine=np.eye(4), lookup_colormap=lut) else: image_actor_z = actor.slicer(data, affine, lookup_colormap=lut) """ We can also change also the opacity of the slicer. """ slicer_opacity = 0.6 image_actor_z.opacity(slicer_opacity) """ We can add additonal slicers by copying the original and adjusting the ``display_extent``. """ image_actor_x = image_actor_z.copy() image_actor_x.opacity(slicer_opacity) x_midpoint = int(np.round(shape[0] / 2)) image_actor_x.display_extent(x_midpoint, x_midpoint, 0, shape[1] - 1, 0, shape[2] - 1) image_actor_y = image_actor_z.copy() image_actor_y.opacity(slicer_opacity) y_midpoint = int(np.round(shape[1] / 2)) image_actor_y.display_extent(0, shape[0] - 1, y_midpoint, y_midpoint, 0, shape[2] - 1) """ Connect the actors with the Renderer. """ ren.add(stream_actor) ren.add(image_actor_z) ren.add(image_actor_x) ren.add(image_actor_y) """ Now we would like to change the position of each ``image_actor`` using a slider. The sliders are widgets which require access to different areas of the visualization pipeline and therefore we don't recommend using them with ``show``. The more appropriate way is to use them with the ``ShowManager`` object which allows accessing the pipeline in different areas. Here is how: """ show_m = window.ShowManager(ren, size=(1200, 900)) show_m.initialize() """ After we have initialized the ``ShowManager`` we can go ahead and create sliders to move the slices and change their opacity. """ line_slider_z = ui.LineSlider2D(min_value=0, max_value=shape[2] - 1, initial_value=shape[2] / 2, text_template="{value:.0f}", length=140) line_slider_x = ui.LineSlider2D(min_value=0, max_value=shape[0] - 1, initial_value=shape[0] / 2, text_template="{value:.0f}", length=140) line_slider_y = ui.LineSlider2D(min_value=0, max_value=shape[1] - 1, initial_value=shape[1] / 2, text_template="{value:.0f}", length=140) opacity_slider = ui.LineSlider2D(min_value=0.0, max_value=1.0, initial_value=slicer_opacity, length=140) """ Now we will write callbacks for the sliders and register them. """ def change_slice_z(i_ren, obj, slider): z = int(np.round(slider.value)) image_actor_z.display_extent(0, shape[0] - 1, 0, shape[1] - 1, z, z) def change_slice_x(i_ren, obj, slider): x = int(np.round(slider.value)) image_actor_x.display_extent(x, x, 0, shape[1] - 1, 0, shape[2] - 1) def change_slice_y(i_ren, obj, slider): y = int(np.round(slider.value)) image_actor_y.display_extent(0, shape[0] - 1, y, y, 0, shape[2] - 1) def change_opacity(i_ren, obj, slider): slicer_opacity = slider.value image_actor_z.opacity(slicer_opacity) image_actor_x.opacity(slicer_opacity) image_actor_y.opacity(slicer_opacity) line_slider_z.add_callback(line_slider_z.slider_disk, "MouseMoveEvent", change_slice_z) line_slider_x.add_callback(line_slider_x.slider_disk, "MouseMoveEvent", change_slice_x) line_slider_y.add_callback(line_slider_y.slider_disk, "MouseMoveEvent", change_slice_y) opacity_slider.add_callback(opacity_slider.slider_disk, "MouseMoveEvent", change_opacity) """ We'll also create text labels to identify the sliders. """ def build_label(text): label = ui.TextBlock2D() label.message = text label.font_size = 18 label.font_family = 'Arial' label.justification = 'left' label.bold = False label.italic = False label.shadow = False # label.actor.GetTextProperty().SetBackgroundColor(0, 0, 0) # label.actor.GetTextProperty().SetBackgroundOpacity(0.0) label.color = (1, 1, 1) return label line_slider_label_z = build_label(text="Z Slice") line_slider_label_x = build_label(text="X Slice") line_slider_label_y = build_label(text="Y Slice") opacity_slider_label = build_label(text="Opacity") """ Now we will create a ``panel`` to contain the sliders and labels. """ panel = ui.Panel2D(center=(1030, 120), size=(300, 200), color=(1, 1, 1), opacity=0.1, align="right") panel.add_element(line_slider_label_x, 'relative', (0.1, 0.75)) panel.add_element(line_slider_x, 'relative', (0.65, 0.8)) panel.add_element(line_slider_label_y, 'relative', (0.1, 0.55)) panel.add_element(line_slider_y, 'relative', (0.65, 0.6)) panel.add_element(line_slider_label_z, 'relative', (0.1, 0.35)) panel.add_element(line_slider_z, 'relative', (0.65, 0.4)) panel.add_element(opacity_slider_label, 'relative', (0.1, 0.15)) panel.add_element(opacity_slider, 'relative', (0.65, 0.2)) show_m.ren.add(panel) """ Then, we can render all the widgets and everything else in the screen and start the interaction using ``show_m.start()``. However, if you change the window size, the panel will not update its position properly. The solution to this issue is to update the position of the panel using its ``re_align`` method every time the window size changes. """ global size size = ren.GetSize() def win_callback(obj, event): global size if size != obj.GetSize(): size_old = size size = obj.GetSize() size_change = [size[0] - size_old[0], 0] panel.re_align(size_change) show_m.initialize() """ Finally, please set the following variable to ``True`` to interact with the datasets in 3D. """ interactive = True #False ren.zoom(1.5) ren.reset_clipping_range() if interactive: show_m.add_window_callback(win_callback) show_m.render() show_m.start() else: window.record( ren, out_path= '/home/brain/workingdir/data/dwi/hcp/preprocessed/response_dhollander/' '100408/result/result20vs45/cc_clustering_png1/100408lr15_%s.png' % s, size=(1200, 900), reset_camera=False) """ .. figure:: bundles_and_3_slices.png :align: center A few bundles with interactive slicing. """ del show_m """
worksheet.write(bun_num + 1, l, np.shape(bundle)[0]) l += 1 for ref in references: worksheet.write(bun_num + 1, l + 0, np.mean(bundles_fa[bun_num])) worksheet.write(bun_num + 1, l + 1, np.min(bundles_fa[bun_num])) worksheet.write(bun_num + 1, l + 2, np.max(bundles_fa[bun_num])) worksheet.write(bun_num + 1, l + 3, np.std(bundles_fa[bun_num])) l = l + 4 bun_num += 1 workbook.close() lut_cmap = actor.colormap_lookup_table(scale_range=(0.1, 0.25)) record_path = os.path.join( figures_path, group_str + '_MDT' + ratio_str + '_' + index_to_struct[target_tuple[0]] + '_to_' + index_to_struct[target_tuple[1]] + '_bundles_figure.png') #scene = None #interactive = False #record_path = None scene = setup_view(selected_bundles, colors=lut_cmap, ref=anat_path, world_coords=True, objectvals=bundles_fa, colorbar=True, record=record_path,
:align: center **Every point with a color from FA**. Show every point with a value from a volume with your colormap ============================================================== Here we will need to input the ``fa`` map in ``streamtube`` or `` """ renderer.clear() hue = [0.0, 0.0] # red only saturation = [0.0, 1.0] # white to red lut_cmap = actor.colormap_lookup_table(hue_range=hue, saturation_range=saturation) stream_actor3 = actor.line(bundle_native, fa, linewidth=0.1, lookup_colormap=lut_cmap) bar2 = actor.scalar_bar(lut_cmap) renderer.add(stream_actor3) renderer.add(bar2) # window.show(renderer, size=(600, 600), reset_camera=False) window.record(renderer, out_path="bundle3.png", size=(600, 600)) """ .. figure:: bundle3.png :align: center **Every point with a color from FA using a non default colormap**.
trk_file_path = os.path.join(trk_folder, trk_name) if os.path.exists(trk_file_path): try: streamlines_data = load_trk(trk_file_path, 'same') except: streamlines_data = load_trk_spe(trk_file_path, 'same') else: raise Exception('cannot find file') streamlines = streamlines_data.streamlines hue = (0.5, 0.5) # blue only, probably should change soonish saturation = (0.0, 1.0) # black to white lut_cmap = actor.colormap_lookup_table(scale_range=(0, 1), hue_range=hue, saturation_range=saturation) # lut_cmap = actor.colormap_lookup_table( # scale_range=(0.01, 0.55)) lut_cmap = actor.colormap_lookup_table(scale_range=(0.1, 0.25)) record_path = os.path.join(figures_path, trk_name[0:6] + '_figure.png') scene = None interactive = True # record_path = None scene = setup_view(streamlines[:], colors=lut_cmap, ref=anat_path, world_coords=True,
""" Now we calculate the Cluster Confidence Index using the corpus callosum streamline bundle and visualize them. """ cci = cluster_confidence(long_streamlines) # Visualize the streamlines, colored by cci ren = window.Renderer() hue = [0.5, 1] saturation = [0.0, 1.0] lut_cmap = actor.colormap_lookup_table(scale_range=(cci.min(), cci.max()/4), hue_range=hue, saturation_range=saturation) bar3 = actor.scalar_bar(lut_cmap) ren.add(bar3) stream_actor = actor.line(long_streamlines, cci, linewidth=0.1, lookup_colormap=lut_cmap) ren.add(stream_actor) """ If you set interactive to True (below), the rendering will pop up in an interactive window. """
def test_slicer(): renderer = window.renderer() data = (255 * np.random.rand(50, 50, 50)) affine = np.eye(4) slicer = actor.slicer(data, affine) slicer.display(None, None, 25) renderer.add(slicer) renderer.reset_camera() renderer.reset_clipping_range() # window.show(renderer) # copy pixels in numpy array directly arr = window.snapshot(renderer, 'test_slicer.png', offscreen=True) import scipy print(scipy.__version__) print(scipy.__file__) print(arr.sum()) print(np.sum(arr == 0)) print(np.sum(arr > 0)) print(arr.shape) print(arr.dtype) report = window.analyze_snapshot(arr, find_objects=True) npt.assert_equal(report.objects, 1) # print(arr[..., 0]) # The slicer can cut directly a smaller part of the image slicer.display_extent(10, 30, 10, 30, 35, 35) renderer.ResetCamera() renderer.add(slicer) # save pixels in png file not a numpy array with TemporaryDirectory() as tmpdir: fname = os.path.join(tmpdir, 'slice.png') # window.show(renderer) window.snapshot(renderer, fname, offscreen=True) report = window.analyze_snapshot(fname, find_objects=True) npt.assert_equal(report.objects, 1) npt.assert_raises(ValueError, actor.slicer, np.ones(10)) renderer.clear() rgb = np.zeros((30, 30, 30, 3)) rgb[..., 0] = 1. rgb_actor = actor.slicer(rgb) renderer.add(rgb_actor) renderer.reset_camera() renderer.reset_clipping_range() arr = window.snapshot(renderer, offscreen=True) report = window.analyze_snapshot(arr, colors=[(255, 0, 0)]) npt.assert_equal(report.objects, 1) npt.assert_equal(report.colors_found, [True]) lut = actor.colormap_lookup_table(scale_range=(0, 255), hue_range=(0.4, 1.), saturation_range=(1, 1.), value_range=(0., 1.)) renderer.clear() slicer_lut = actor.slicer(data, lookup_colormap=lut) slicer_lut.display(10, None, None) slicer_lut.display(None, 10, None) slicer_lut.display(None, None, 10) slicer_lut.opacity(0.5) slicer_lut.tolerance(0.03) slicer_lut2 = slicer_lut.copy() npt.assert_equal(slicer_lut2.GetOpacity(), 0.5) npt.assert_equal(slicer_lut2.picker.GetTolerance(), 0.03) slicer_lut2.opacity(1) slicer_lut2.tolerance(0.025) slicer_lut2.display(None, None, 10) renderer.add(slicer_lut2) renderer.reset_clipping_range() arr = window.snapshot(renderer, offscreen=True) report = window.analyze_snapshot(arr, find_objects=True) npt.assert_equal(report.objects, 1) renderer.clear() data = (255 * np.random.rand(50, 50, 50)) affine = np.diag([1, 3, 2, 1]) slicer = actor.slicer(data, affine, interpolation='nearest') slicer.display(None, None, 25) renderer.add(slicer) renderer.reset_camera() renderer.reset_clipping_range() arr = window.snapshot(renderer, offscreen=True) report = window.analyze_snapshot(arr, find_objects=True) npt.assert_equal(report.objects, 1) npt.assert_equal(data.shape, slicer.shape) renderer.clear() data = (255 * np.random.rand(50, 50, 50)) affine = np.diag([1, 3, 2, 1]) from dipy.align.reslice import reslice data2, affine2 = reslice(data, affine, zooms=(1, 3, 2), new_zooms=(1, 1, 1)) slicer = actor.slicer(data2, affine2, interpolation='linear') slicer.display(None, None, 25) renderer.add(slicer) renderer.reset_camera() renderer.reset_clipping_range() # window.show(renderer, reset_camera=False) arr = window.snapshot(renderer, offscreen=True) report = window.analyze_snapshot(arr, find_objects=True) npt.assert_equal(report.objects, 1) npt.assert_array_equal([1, 3, 2] * np.array(data.shape), np.array(slicer.shape))
def main(): global parser global args global model global bar global lut_cmap global list_x_file global max_weight global saturation global renderer global norm_fib global norm1 global norm2 global norm3 global big_stream_actor global good_stream_actor global weak_stream_actor global big_Weight global good_Weight global weak_Weight global smallBundle_safe global smallWeight_safe global show_m global big_bundle global good_bundle global weak_bundle global nF global nIC global Ra global change_colormap_slider global remove_small_weights_slider global opacity_slider global remove_big_weights_slider global change_iteration_slider global num_computed_streamlines global numbers_of_streamlines_in_interval #defining the model used (Stick or cylinder) model = None if(os.path.isdir(args.commitOutputPath+"/Results_StickZeppelinBall") and os.path.isdir(args.commitOutputPath+"/Results_CylinderZeppelinBall")): model_index = input("Which model do you want to load (1 for 'Cylinder', 2 for 'Stick') : ") if(model_index==1): model = "Cylinder" else: model ="Stick" elif(os.path.isdir(args.commitOutputPath+"/Results_StickZeppelinBall")): model = "Stick" elif(os.path.isdir(args.commitOutputPath+"/Results_CylinderZeppelinBall")): model = "Cylinder" else: print("No valide model in this path") sys.exit(0) #formalizing the filenames of the iterations list_x_file = [file for file in os.listdir(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/") if (file.endswith('.npy') and (file[:-4]).isdigit() )] normalize_file_name(list_x_file) list_x_file.sort() num_iteration=len(list_x_file) #number of streamlines we want to load num_computed_streamlines = int(args.streamlinesNumber) #computing interval of weights max_weight = 0; if(model == "Cylinder"): file = open( args.commitOutputPath+"/Results_"+model+"ZeppelinBall/results.pickle",'rb' ) object_file = pickle.load( file ) Ra = np.linspace( 0.75,3.5,12 ) * 1E-6 nIC = len(Ra) # IC atoms nEC = 4 # EC atoms nISO = 1 # ISO atoms nF = object_file[0]['optimization']['regularisation']['sizeIC'] nE = object_file[0]['optimization']['regularisation']['sizeEC'] nV = object_file[0]['optimization']['regularisation']['sizeISO'] num_ADI = np.zeros( nF ) den_ADI = np.zeros( nF ) dim = nib.load(args.commitOutputPath+"/Results_"+model+"ZeppelinBall/compartment_IC.nii.gz").get_data().shape norm_fib = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/norm_fib.npy") norm1 = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/norm1.npy") norm2 = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/norm2.npy") norm3 = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/norm3.npy") for itNbr in list_x_file: #computing diameter x = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/"+ itNbr +'.npy') x_norm = x / np.hstack( (norm1*norm_fib,norm2,norm3) ) for i in range(nIC): den_ADI = den_ADI + x_norm[i*nF:(i+1)*nF] num_ADI = num_ADI + x_norm[i*nF:(i+1)*nF] * Ra[i] Weight = 2 * ( num_ADI / ( den_ADI + np.spacing(1) ) ) * 1E6 smallWeight_safe = Weight[:num_computed_streamlines] itNbr_max = np.amax(smallWeight_safe) if(itNbr_max>max_weight): max_weight=itNbr_max else:#model==Stick file = open( args.commitOutputPath+"/Results_"+model+"ZeppelinBall/results.pickle",'rb' ) object_file = pickle.load( file ) norm_fib = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/norm_fib.npy") norm1 = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/norm1.npy") norm2 = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/norm2.npy") norm3 = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/norm3.npy") nF = object_file[0]['optimization']['regularisation']['sizeIC'] for itNbr in list_x_file: x = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/"+ itNbr +'.npy') x_norm = x / np.hstack( (norm1*norm_fib,norm2,norm3) ) Weight = x_norm[:nF] #signal fractions smallWeight_safe = Weight[:num_computed_streamlines] itNbr_max = np.amax(smallWeight_safe) if(itNbr_max>max_weight): max_weight=itNbr_max #we need an interval slightly bigger than the max_weight max_weight = max_weight + 0.00001 #computing initial weights if(model == "Cylinder"):#model==Cylinder file = open( args.commitOutputPath+"/Results_"+model+"ZeppelinBall/results.pickle",'rb' ) object_file = pickle.load( file ) Ra = np.linspace( 0.75,3.5,12 ) * 1E-6 nIC = len(Ra) # IC atoms nEC = 4 # EC atoms nISO = 1 # ISO atoms nF = object_file[0]['optimization']['regularisation']['sizeIC'] nE = object_file[0]['optimization']['regularisation']['sizeEC'] nV = object_file[0]['optimization']['regularisation']['sizeISO'] dim = nib.load(args.commitOutputPath+"/Results_"+model+"ZeppelinBall/compartment_IC.nii.gz").get_data().shape norm_fib = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/norm_fib.npy") #add the normalisation x = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/"+list_x_file[0]+'.npy') norm1 = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/norm1.npy") norm2 = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/norm2.npy") norm3 = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/norm3.npy") x_norm = x / np.hstack( (norm1*norm_fib,norm2,norm3) ) num_ADI = np.zeros( nF ) den_ADI = np.zeros( nF ) for i in range(nIC): den_ADI = den_ADI + x_norm[i*nF:(i+1)*nF] num_ADI = num_ADI + x_norm[i*nF:(i+1)*nF] * Ra[i] Weight = 2 * ( num_ADI / ( den_ADI + np.spacing(1) ) ) * 1E6 smallWeight_safe = Weight[:num_computed_streamlines] weak_Weight = smallWeight_safe[:1] big_Weight = smallWeight_safe[:1] good_Weight = copy.copy(smallWeight_safe) else:#model==Stick file = open( args.commitOutputPath+"/Results_"+model+"ZeppelinBall/results.pickle",'rb' ) object_file = pickle.load( file ) nF = object_file[0]['optimization']['regularisation']['sizeIC'] x = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/"+list_x_file[0]+'.npy') norm1 = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/norm1.npy") norm2 = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/norm2.npy") norm3 = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/norm3.npy") x_norm = x / np.hstack( (norm1*norm_fib,norm2,norm3) ) Weight = x_norm[:nF] #signal fractions smallWeight_safe = Weight[:num_computed_streamlines] weak_Weight = smallWeight_safe[:1] big_Weight = smallWeight_safe[:1] good_Weight = copy.copy(smallWeight_safe) #load streamlines from the dictionary_TRK_fibers_trk file streams, hdr = nib.trackvis.read(args.commitOutputPath+"/dictionary_TRK_fibers.trk") streamlines = [s[0] for s in streams] smallBundle_safe = streamlines[:num_computed_streamlines] weak_bundle = smallBundle_safe[:1] big_bundle = smallBundle_safe[:1] good_bundle = copy.copy(smallBundle_safe) #number of good streamlines num_streamlines = len(smallBundle_safe) # mapping streamlines and initial weights(with a red bar) in a renderer hue = [0, 0] # red only saturation = [0.0, 1.0] # black to white lut_cmap = actor.colormap_lookup_table( scale_range=(0, max_weight), hue_range=hue, saturation_range=saturation) weak_stream_actor = actor.line(weak_bundle, weak_Weight, lookup_colormap=lut_cmap) big_stream_actor = actor.line(big_bundle, big_Weight, lookup_colormap=lut_cmap) good_stream_actor = actor.line(good_bundle, good_Weight, lookup_colormap=lut_cmap) bar = actor.scalar_bar(lut_cmap, title = 'weight') bar.SetHeight(0.5) bar.SetWidth(0.1) bar.SetPosition(0.85,0.45) renderer = window.Renderer() renderer.set_camera(position=(-176.42, 118.52, 128.20), focal_point=(113.30, 100, 76.56), view_up=(0.18, 0.00, 0.98)) renderer.add(big_stream_actor) renderer.add(good_stream_actor) renderer.add(weak_stream_actor) renderer.add(bar) #adding sliders and renderer to a ShowManager show_m = window.ShowManager(renderer, size=(1200, 900)) show_m.initialize() save_one_image_bouton = ui.LineSlider2D(min_value=0, max_value=1, initial_value=0, text_template="save", length=1) add_graph_bouton = ui.LineSlider2D(min_value=0, max_value=1, initial_value=0, text_template="graph", length=1) color_slider = ui.LineSlider2D(min_value=0.0, max_value=1.0, initial_value=0, text_template="{value:.1f}", length=140) change_colormap_slider = ui.LineSlider2D(min_value=0, max_value=1.0, initial_value=0, text_template="", length=40) change_iteration_slider = ui.LineSlider2D(min_value=0, #we can't have max_value=num_iteration because #list_x_file[num_iteration] lead to an error max_value=num_iteration-0.01, initial_value=0, text_template=list_x_file[0], length=140) remove_big_weights_slider = ui.LineSlider2D(min_value=0, max_value=max_weight, initial_value=max_weight, text_template="{value:.2f}", length=140) remove_small_weights_slider = ui.LineSlider2D(min_value=0, max_value=max_weight, initial_value=0, text_template="{value:.2f}", length=140) opacity_slider = ui.LineSlider2D(min_value=0.0, max_value=1.0, initial_value=0.5, text_template="{ratio:.0%}", length=140) save_one_image_bouton.add_callback(save_one_image_bouton.slider_disk, "LeftButtonPressEvent", save_one_image) color_slider.add_callback(color_slider.slider_disk, "MouseMoveEvent", change_streamlines_color) color_slider.add_callback(color_slider.slider_line, "LeftButtonPressEvent", change_streamlines_color) add_graph_bouton.add_callback(add_graph_bouton.slider_disk, "LeftButtonPressEvent", add_graph) change_colormap_slider.add_callback(change_colormap_slider.slider_disk, "MouseMoveEvent", change_colormap) change_colormap_slider.add_callback(change_colormap_slider.slider_line, "LeftButtonPressEvent", change_colormap) change_iteration_slider.add_callback(change_iteration_slider.slider_disk, "MouseMoveEvent", change_iteration) change_iteration_slider.add_callback(change_iteration_slider.slider_line, "LeftButtonPressEvent", change_iteration) remove_big_weights_slider.add_callback(remove_big_weights_slider.slider_disk, "MouseMoveEvent", remove_big_weight) remove_big_weights_slider.add_callback(remove_big_weights_slider.slider_line, "LeftButtonPressEvent", remove_big_weight) remove_small_weights_slider.add_callback(remove_small_weights_slider.slider_disk, "MouseMoveEvent", remove_small_weight) remove_small_weights_slider.add_callback(remove_small_weights_slider.slider_line, "LeftButtonPressEvent", remove_small_weight) opacity_slider.add_callback(opacity_slider.slider_disk, "MouseMoveEvent", change_opacity) opacity_slider.add_callback(opacity_slider.slider_line, "LeftButtonPressEvent", change_opacity) color_slider_label = ui.TextBlock2D() color_slider_label.message = 'color of streamlines' change_colormap_slider_label_weight = ui.TextBlock2D() change_colormap_slider_label_weight.message = 'weight color' change_colormap_slider_label_direction = ui.TextBlock2D() change_colormap_slider_label_direction.message = 'direction color' change_iteration_slider_label = ui.TextBlock2D() change_iteration_slider_label.message = 'number of the iteration' remove_big_weights_slider_label = ui.TextBlock2D() remove_big_weights_slider_label.message = 'big weights subdued' remove_small_weights_slider_label = ui.TextBlock2D() remove_small_weights_slider_label.message = 'small weights subdued' opacity_slider_label = ui.TextBlock2D() opacity_slider_label.message = 'Unwanted weights opacity' numbers_of_streamlines_in_interval = ui.TextBlock2D() numbers_of_streamlines_in_interval.message = "Number of streamlines in interval: "+str(num_streamlines) panel = ui.Panel2D(center=(300, 160), size=(500, 280), color=(1, 1, 1), opacity=0.1, align="right") panel.add_element(save_one_image_bouton, 'relative', (0.9, 0.9)) panel.add_element(add_graph_bouton, 'relative', (0.9, 0.77)) panel.add_element(color_slider_label, 'relative', (0.05, 0.85)) panel.add_element(color_slider, 'relative', (0.7, 0.9)) panel.add_element(numbers_of_streamlines_in_interval, 'relative', (0.05, 0.72)) panel.add_element(change_colormap_slider_label_weight, 'relative', (0.05, 0.59)) panel.add_element(change_colormap_slider_label_direction, 'relative', (0.5, 0.59)) panel.add_element(change_colormap_slider, 'relative', (0.4, 0.64)) panel.add_element(change_iteration_slider_label, 'relative', (0.05, 0.46)) panel.add_element(change_iteration_slider, 'relative', (0.7, 0.51)) panel.add_element(remove_big_weights_slider_label, 'relative', (0.05, 0.33)) panel.add_element(remove_big_weights_slider, 'relative', (0.7, 0.37)) panel.add_element(remove_small_weights_slider_label, 'relative', (0.05, 0.2)) panel.add_element(remove_small_weights_slider, 'relative', (0.7, 0.24)) panel.add_element(opacity_slider_label, 'relative', (0.05, 0.07)) panel.add_element(opacity_slider, 'relative', (0.7, 0.11)) panel.add_to_renderer(renderer) renderer.reset_clipping_range() show_m.render() show_m.start()
long_streamlines.append(sl) """ Now we calculate the Cluster Confidence Index using the corpus callosum streamline bundle and visualize them. """ cci = cluster_confidence(long_streamlines) # Visualize the streamlines, colored by cci ren = window.Renderer() hue = [0.5, 1] saturation = [0.0, 1.0] lut_cmap = actor.colormap_lookup_table(scale_range=(cci.min(), cci.max() / 4), hue_range=hue, saturation_range=saturation) bar3 = actor.scalar_bar(lut_cmap) ren.add(bar3) stream_actor = actor.line(long_streamlines, cci, linewidth=0.1, lookup_colormap=lut_cmap) ren.add(stream_actor) """ If you set interactive to True (below), the rendering will pop up in an interactive window. """
:align: center Every point with a color from FA. Show every point with a value from a volume with your colormap ============================================================== Here we will need to input the ``fa`` map in ``streamtube`` """ scene.clear() hue = (0.0, 0.0) # red only saturation = (0.0, 1.0) # white to red lut_cmap = actor.colormap_lookup_table(hue_range=hue, saturation_range=saturation) stream_actor3 = actor.line(bundle_native, fa, linewidth=0.1, lookup_colormap=lut_cmap) bar2 = actor.scalar_bar(lut_cmap) scene.add(stream_actor3) scene.add(bar2) # window.show(scene, size=(600, 600), reset_camera=False) window.record(scene, out_path='bundle3.png', size=(600, 600)) """ .. figure:: bundle3.png :align: center
""" fname_fa = os.path.join(os.path.expanduser('~'), '.dipy', 'exp_bundles_and_maps', 'bundles_2_subjects', 'subj_1', 'fa_1x1x1.nii.gz') img = nib.load(fname_fa) fa = img.get_data() """ Notice here how the scale range is (0, 255) and not (0, 1) which is the usual range of FA values. """ lut = actor.colormap_lookup_table(scale_range=(0, 255), hue_range=(0.4, 1.), saturation_range=(1, 1.), value_range=(0., 1.)) """ This is because the lookup table is applied in the slice after interpolating to (0, 255). """ fa_actor = actor.slicer(fa, affine, lookup_colormap=lut) renderer.clear() renderer.add(fa_actor) renderer.reset_camera() renderer.zoom(1.4)
def main(): # reads the tractography data in trk format # extracts streamlines and the file header. Streamlines should be in the same coordinate system as the FA map (used later). # input example: '/home/Example_data/tracts.trk' tractography_file = input( "Please, specify the file with tracts that you would like to analyse. File should be in the trk format. " ) streams, hdr = load_trk(tractography_file) # for old DIPY version # sft = load_trk(tractography_file, tractography_file) # streams = sft.streamlines streams_array = np.asarray(streams) print('imported tractography data:' + tractography_file) # load T1fs_conform image that operates in the same coordinates as simnibs except for the fact the center of mesh # is located at the image center # T1fs_conform image should be generated in advance during the head meshing procedure # input example: fname_T1='/home/Example_data/T1fs_conform.nii.gz' fname_T1 = input( "Please, specify the T1fs_conform image that has been generated during head meshing procedure. " ) data_T1, affine_T1 = load_nifti(fname_T1) # load FA image in the same coordinates as tracts # input example:fname_FA='/home/Example_data/DTI_FA.nii' fname_FA = input("Please, specify the FA image. ") data_FA, affine_FA = load_nifti(fname_FA) print('loaded T1fs_conform.nii and FA images') # specify the head mesh file that is used later in simnibs to simulate induced electric field # input example:'/home/Example_data/SUBJECT_MESH.msh' global mesh_path mesh_path = input("Please, specify the head mesh file. ") last_slach = max([i for i, ltr in enumerate(mesh_path) if ltr == '/']) + 1 global subject_name subject_name = mesh_path[last_slach:-4] # specify the directory where you would like to save your simulation results # input example:'/home/Example_data/Output' global out_dir out_dir = input( "Please, specify the directory where you would like to save your simulation results. " ) out_dir = out_dir + '/simulation_at_pos_' # Co-registration of T1fs_conform and FA images. Performed in 4 steps. # Step 1. Calculation of the center of mass transform. Used later as starting transform. c_of_mass = transform_centers_of_mass(data_T1, affine_T1, data_FA, affine_FA) print('calculated c_of_mass transformation') # Step 2. Calculation of a 3D translation transform. Used in the next step as starting transform. nbins = 32 sampling_prop = None metric = MutualInformationMetric(nbins, sampling_prop) level_iters = [10000, 1000, 100] sigmas = [3.0, 1.0, 0.0] factors = [4, 2, 1] affreg = AffineRegistration(metric=metric, level_iters=level_iters, sigmas=sigmas, factors=factors) transform = TranslationTransform3D() params0 = None starting_affine = c_of_mass.affine translation = affreg.optimize(data_T1, data_FA, transform, params0, affine_T1, affine_FA, starting_affine=starting_affine) print('calculated 3D translation transform') # Step 3. Calculation of a Rigid 3D transform. Used in the next step as starting transform transform = RigidTransform3D() params0 = None starting_affine = translation.affine rigid = affreg.optimize(data_T1, data_FA, transform, params0, affine_T1, affine_FA, starting_affine=starting_affine) print('calculated Rigid 3D transform') # Step 4. Calculation of an affine transform. Used for co-registration of T1 and FA images. transform = AffineTransform3D() params0 = None starting_affine = rigid.affine affine = affreg.optimize(data_T1, data_FA, transform, params0, affine_T1, affine_FA, starting_affine=starting_affine) print('calculated Affine 3D transform') identity = np.eye(4) inv_affine_FA = np.linalg.inv(affine_FA) inv_affine_T1 = np.linalg.inv(affine_T1) inv_affine = np.linalg.inv(affine.affine) # transforming streamlines to FA space new_streams_FA = streamline.transform_streamlines(streams, inv_affine_FA) new_streams_FA_array = np.asarray(new_streams_FA) T1_to_FA = np.dot(inv_affine_FA, np.dot(affine.affine, affine_T1)) FA_to_T1 = np.linalg.inv(T1_to_FA) # transforming streamlines from FA to T1 space new_streams_T1 = streamline.transform_streamlines(new_streams_FA, FA_to_T1) global new_streams_T1_array new_streams_T1_array = np.asarray(new_streams_T1) # calculating amline derivatives along the streamlines to get the local orientation of the streamlines global streams_array_derivative streams_array_derivative = copy.deepcopy(new_streams_T1_array) print('calculating amline derivatives') for stream in range(len(new_streams_T1_array)): my_steam = new_streams_T1_array[stream] for t in range(len(my_steam[:, 0])): streams_array_derivative[stream][t, 0] = my_deriv(t, my_steam[:, 0]) streams_array_derivative[stream][t, 1] = my_deriv(t, my_steam[:, 1]) streams_array_derivative[stream][t, 2] = my_deriv(t, my_steam[:, 2]) deriv_norm = np.linalg.norm(streams_array_derivative[stream][t, :]) streams_array_derivative[stream][ t, :] = streams_array_derivative[stream][t, :] / deriv_norm # to create a torus representing a coil in an interactive window torus = vtk.vtkParametricTorus() torus.SetRingRadius(5) torus.SetCrossSectionRadius(2) torusSource = vtk.vtkParametricFunctionSource() torusSource.SetParametricFunction(torus) torusSource.SetScalarModeToPhase() torusMapper = vtk.vtkPolyDataMapper() torusMapper.SetInputConnection(torusSource.GetOutputPort()) torusMapper.SetScalarRange(0, 360) torusActor = vtk.vtkActor() torusActor.SetMapper(torusMapper) torus_pos_x = 100 torus_pos_y = 129 torus_pos_z = 211 torusActor.SetPosition(torus_pos_x, torus_pos_y, torus_pos_z) list_streams_T1 = list(new_streams_T1) # adding one fictive bundle of length 1 with coordinates [0,0,0] to avoid some bugs with actor.line during visualization list_streams_T1.append(np.array([0, 0, 0])) global bundle_native bundle_native = list_streams_T1 # generating a list of colors to visualize later the stimualtion effects effect_max = 0.100 effect_min = -0.100 global colors colors = [ np.random.rand(*current_streamline.shape) for current_streamline in bundle_native ] for my_streamline in range(len(bundle_native) - 1): my_stream = copy.deepcopy(bundle_native[my_streamline]) for point in range(len(my_stream)): colors[my_streamline][point] = vtkplotter.colors.colorMap( (effect_min + effect_max) / 2, name='jet', vmin=effect_min, vmax=effect_max) colors[my_streamline + 1] = vtkplotter.colors.colorMap(effect_min, name='jet', vmin=effect_min, vmax=effect_max) # Vizualization of fibers over T1 # i_coord = 0 # j_coord = 0 # k_coord = 0 # global number_of_stimulations number_of_stimulations = 0 actor_line_list = [] scene = window.Scene() scene.clear() scene.background((0.5, 0.5, 0.5)) world_coords = False shape = data_T1.shape lut = actor.colormap_lookup_table(scale_range=(effect_min, effect_max), hue_range=(0.4, 1.), saturation_range=(1, 1.)) # # the lines below is for a non-interactive demonstration run only. # # they should remain commented unless you set "interactive" to False # lut, colors = change_TMS_effects(torus_pos_x, torus_pos_y, torus_pos_z) # bar = actor.scalar_bar(lut) # bar.SetTitle("TMS effect") # bar.SetHeight(0.3) # bar.SetWidth(0.10) # bar.SetPosition(0.85, 0.3) # scene.add(bar) actor_line_list.append( actor.line(bundle_native, colors, linewidth=5, fake_tube=True, lookup_colormap=lut)) if not world_coords: image_actor_z = actor.slicer(data_T1, identity) else: image_actor_z = actor.slicer(data_T1, identity) slicer_opacity = 0.6 image_actor_z.opacity(slicer_opacity) image_actor_x = image_actor_z.copy() x_midpoint = int(np.round(shape[0] / 2)) image_actor_x.display_extent(x_midpoint, x_midpoint, 0, shape[1] - 1, 0, shape[2] - 1) image_actor_y = image_actor_z.copy() y_midpoint = int(np.round(shape[1] / 2)) image_actor_y.display_extent(0, shape[0] - 1, y_midpoint, y_midpoint, 0, shape[2] - 1) """ Connect the actors with the scene. """ scene.add(actor_line_list[0]) scene.add(image_actor_z) scene.add(image_actor_x) scene.add(image_actor_y) show_m = window.ShowManager(scene, size=(1200, 900)) show_m.initialize() """ Create sliders to move the slices and change their opacity. """ line_slider_z = ui.LineSlider2D(min_value=0, max_value=shape[2] - 1, initial_value=shape[2] / 2, text_template="{value:.0f}", length=140) line_slider_x = ui.LineSlider2D(min_value=0, max_value=shape[0] - 1, initial_value=shape[0] / 2, text_template="{value:.0f}", length=140) line_slider_y = ui.LineSlider2D(min_value=0, max_value=shape[1] - 1, initial_value=shape[1] / 2, text_template="{value:.0f}", length=140) opacity_slider = ui.LineSlider2D(min_value=0.0, max_value=1.0, initial_value=slicer_opacity, length=140) """ Сallbacks for the sliders. """ def change_slice_z(slider): z = int(np.round(slider.value)) image_actor_z.display_extent(0, shape[0] - 1, 0, shape[1] - 1, z, z) def change_slice_x(slider): x = int(np.round(slider.value)) image_actor_x.display_extent(x, x, 0, shape[1] - 1, 0, shape[2] - 1) def change_slice_y(slider): y = int(np.round(slider.value)) image_actor_y.display_extent(0, shape[0] - 1, y, y, 0, shape[2] - 1) def change_opacity(slider): slicer_opacity = slider.value image_actor_z.opacity(slicer_opacity) image_actor_x.opacity(slicer_opacity) image_actor_y.opacity(slicer_opacity) line_slider_z.on_change = change_slice_z line_slider_x.on_change = change_slice_x line_slider_y.on_change = change_slice_y opacity_slider.on_change = change_opacity """ Сreate text labels to identify the sliders. """ def build_label(text): label = ui.TextBlock2D() label.message = text label.font_size = 18 label.font_family = 'Arial' label.justification = 'left' label.bold = False label.italic = False label.shadow = False label.background = (0, 0, 0) label.color = (1, 1, 1) return label line_slider_label_z = build_label(text="Z Slice") line_slider_label_x = build_label(text="X Slice") line_slider_label_y = build_label(text="Y Slice") opacity_slider_label = build_label(text="Opacity") """ Create a ``panel`` to contain the sliders and labels. """ panel = ui.Panel2D(size=(300, 200), color=(1, 1, 1), opacity=0.1, align="right") panel.center = (1030, 120) panel.add_element(line_slider_label_x, (0.1, 0.75)) panel.add_element(line_slider_x, (0.38, 0.75)) panel.add_element(line_slider_label_y, (0.1, 0.55)) panel.add_element(line_slider_y, (0.38, 0.55)) panel.add_element(line_slider_label_z, (0.1, 0.35)) panel.add_element(line_slider_z, (0.38, 0.35)) panel.add_element(opacity_slider_label, (0.1, 0.15)) panel.add_element(opacity_slider, (0.38, 0.15)) scene.add(panel) """ Create a ``panel`` to show the value of a picked voxel. """ label_position = ui.TextBlock2D(text='Position:') label_value = ui.TextBlock2D(text='Value:') result_position = ui.TextBlock2D(text='') result_value = ui.TextBlock2D(text='') text2 = ui.TextBlock2D(text='Calculate') panel_picking = ui.Panel2D(size=(250, 125), color=(1, 1, 1), opacity=0.1, align="left") panel_picking.center = (200, 120) panel_picking.add_element(label_position, (0.1, 0.75)) panel_picking.add_element(label_value, (0.1, 0.45)) panel_picking.add_element(result_position, (0.45, 0.75)) panel_picking.add_element(result_value, (0.45, 0.45)) panel_picking.add_element(text2, (0.1, 0.15)) icon_files = [] icon_files.append(('left', read_viz_icons(fname='circle-left.png'))) button_example = ui.Button2D(icon_fnames=icon_files, size=(100, 30)) panel_picking.add_element(button_example, (0.5, 0.1)) def change_text_callback(i_ren, obj, button): text2.message = str(i_coord) + ' ' + str(j_coord) + ' ' + str(k_coord) torusActor.SetPosition(i_coord, j_coord, k_coord) print(i_coord, j_coord, k_coord) lut, colors = change_TMS_effects(i_coord, j_coord, k_coord) scene.rm(actor_line_list[0]) actor_line_list.append( actor.line(bundle_native, colors, linewidth=5, fake_tube=True, lookup_colormap=lut)) scene.add(actor_line_list[1]) nonlocal number_of_stimulations global bar if number_of_stimulations > 0: scene.rm(bar) else: number_of_stimulations = number_of_stimulations + 1 bar = actor.scalar_bar(lut) bar.SetTitle("TMS effect") bar.SetHeight(0.3) bar.SetWidth(0.10) # the width is set first bar.SetPosition(0.85, 0.3) scene.add(bar) actor_line_list.pop(0) i_ren.force_render() button_example.on_left_mouse_button_clicked = change_text_callback scene.add(panel_picking) scene.add(torusActor) def left_click_callback(obj, ev): """Get the value of the clicked voxel and show it in the panel.""" event_pos = show_m.iren.GetEventPosition() obj.picker.Pick(event_pos[0], event_pos[1], 0, scene) global i_coord, j_coord, k_coord i_coord, j_coord, k_coord = obj.picker.GetPointIJK() print(i_coord, j_coord, k_coord) result_position.message = '({}, {}, {})'.format( str(i_coord), str(j_coord), str(k_coord)) result_value.message = '%.8f' % data_T1[i_coord, j_coord, k_coord] torusActor.SetPosition(i_coord, j_coord, k_coord) image_actor_z.AddObserver('LeftButtonPressEvent', left_click_callback, 1.0) global size size = scene.GetSize() def win_callback(obj, event): global size if size != obj.GetSize(): size_old = size size = obj.GetSize() size_change = [size[0] - size_old[0], 0] panel.re_align(size_change) show_m.initialize() """ Set the following variable to ``True`` to interact with the datasets in 3D. """ interactive = True scene.zoom(2.0) scene.reset_clipping_range() scene.set_camera(position=(-642.07, 495.40, 148.49), focal_point=(127.50, 127.50, 127.50), view_up=(0.02, -0.01, 1.00)) if interactive: show_m.add_window_callback(win_callback) show_m.render() show_m.start() else: window.record(scene, out_path=out_dir + '/bundles_and_effects.png', size=(1200, 900), reset_camera=True)
# %% #color by line-average fa group = 2 if group == 1: Nativegroupstreamlines = Nativegroupstreamlines1 groupLinesFA = groupLinesFA1 name = 'Group_Young' else: Nativegroupstreamlines = Nativegroupstreamlines2 groupLinesFA = groupLinesFA2 name = 'Group_Old' cmap = actor.colormap_lookup_table(scale_range=(np.min(groupLinesFA), np.max(groupLinesFA))) renderer = window.Renderer() stream_actor = actor.line(Nativegroupstreamlines, np.array(groupLinesFA), lookup_colormap=cmap) fa_actor = actor.slicer(fa_control, np.eye(4)) renderer.add(stream_actor) renderer.add(fa_actor) bar = actor.scalar_bar(cmap) renderer.add(bar) # Uncomment the line below to show to display the window window.show(renderer, size=(600, 600), reset_camera=False) window.record(renderer, size=(600, 600), out_path=outpath + '/' + str(target_l) + '--' + str(target_r) +