def main(): parser = _build_args_parser() args = parser.parse_args() assert_inputs_exist(parser, [args.tractogram]) assert_outputs_exist(parser, args, [], [args.save]) tracts_format = detect_format(args.tractogram) if tracts_format is not TrkFile: raise ValueError("Invalid input streamline file format " + "(must be trk): {0}".format(args.tractogram_filename)) # Load files and data trk = TrkFile.load(args.tractogram) tractogram = trk.tractogram streamlines = tractogram.streamlines if 'seeds' not in tractogram.data_per_streamline: parser.error('Tractogram does not contain seeds') seeds = tractogram.data_per_streamline['seeds'] # Make display objects streamlines_actor = actor.line(streamlines) points = actor.dots(seeds, color=(1., 1., 1.)) # Add display objects to canvas r = window.Renderer() r.add(streamlines_actor) r.add(points) # Show and record if needed if args.save is not None: window.record(r, out_path=args.save, size=(1000, 1000)) window.show(r)
def main(): parser = _build_arg_parser() args = parser.parse_args() assert_inputs_exist(parser, [args.tractogram]) assert_outputs_exist(parser, args, [], [args.save]) tracts_format = detect_format(args.tractogram) if tracts_format is not TrkFile: raise ValueError("Invalid input streamline file format " + "(must be trk): {0}".format(args.tractogram_filename)) # Load files and data. TRKs can have 'same' as reference tractogram = load_tractogram(args.tractogram, 'same') # Streamlines are saved in RASMM but seeds are saved in VOX # This might produce weird behavior with non-iso tractogram.to_vox() streamlines = tractogram.streamlines if 'seeds' not in tractogram.data_per_streamline: parser.error('Tractogram does not contain seeds') seeds = tractogram.data_per_streamline['seeds'] # Make display objects streamlines_actor = actor.line(streamlines) points = actor.dots(seeds, color=(1., 1., 1.)) # Add display objects to canvas s = window.Scene() s.add(streamlines_actor) s.add(points) # Show and record if needed if args.save is not None: window.record(s, out_path=args.save, size=(1000, 1000)) window.show(s)
def show_template_bundles(final_streamlines, template_path, fname): import nibabel as nib from fury import actor, window renderer = window.Renderer() template_img_data = nib.load(template_path).get_data().astype('bool') template_actor = actor.contour_from_roi(template_img_data, color=(50, 50, 50), opacity=0.05) renderer.add(template_actor) lines_actor = actor.streamtube(final_streamlines, window.colors.orange, linewidth=0.3) renderer.add(lines_actor) window.record(renderer, n_frames=1, out_path=fname, size=(900, 900)) return
def show_model_reco_bundles(model, recognized_bundle, folder_name, file_bundle_name, interactive=True): ren = window.Scene() ren.SetBackground(1, 1, 1) ren.add(actor.line(model, colors=(.1, .7, .26))) #green ren.add(actor.line(recognized_bundle, colors=(.1, .1, 6))) #blue if interactive: window.show(ren) ren.set_camera(ren.camera_info()) window.record(ren, out_path=pjoin(folder_name, file_bundle_name) + '.png', size=(600, 600))
def save_views_imgs(lines, size=(500, 500), interactive=False, ext='jpg'): """ Function to save view images when the input file does fulfill the requirements of the validator. :param lines: Streamlines-like object. :param size: A 2-tuple, containing (width, height) in pixels. :param interactive: (Boolean) If True, launches a window. Useful for developing/debugging options. :param ext: (String) Extension of the output files. """ # Start virtual display if has_xvfbwrapper: print('Starting Xvfb') vdisplay = Xvfb() vdisplay.start() # Create streamlines actor streamlines_actor = actor.line(lines) # Set renderer window scene = window.Scene() # Add streamlines to renderer scene.add(streamlines_actor) # Loop through views for param in _VIEW_PARAMS: # Set camera scene.set_camera(position=param['cam_pos'], focal_point=param['focal_pnt'], view_up=param['view_up']) if interactive: window.show(scene, size=size) # Save imgs out_file = os.path.join('secondary', param['view'] + '.' + ext) print('Saving: {}'.format(out_file)) window.record(scene, out_path=out_file, size=size) # Stop virtual display if has_xvfbwrapper: vdisplay.stop()
def show_both_bundles(bundles, colors=None, show=True, fname=None): if colors is None: colors = [window.colors.orange, window.colors.red] scene = window.Scene() #scene.SetBackground(1., 1, 1) # scene.set_camera(position=(-176.42, 118.52, 128.20), # focal_point=(113.30, 128.31, 76.56), # view_up=(0.18, 0.00, 0.98)) for (i, bundle) in enumerate(bundles): color = colors[i] lines_actor = actor.line(bundle, color, linewidth=0.3) #lines_actor.RotateX(-90) #lines_actor.RotateZ(90) scene.add(lines_actor) if show: window.show(scene) if fname is not None: sleep(1) window.record(scene, n_frames=1, out_path=fname, size=(900, 900))
def show_template_bundles(final_streamlines, template_path, fname): """Displayes the template bundles Parameters ---------- final_streamlines : list Generated streamlines template_path : str Path to reference FA nii.gz file fname : str Path of the output file (saved as ) """ renderer = window.Renderer() template_img_data = nib.load(template_path).get_data().astype("bool") template_actor = actor.contour_from_roi( template_img_data, color=(50, 50, 50), opacity=0.05 ) renderer.add(template_actor) lines_actor = actor.streamtube( final_streamlines, window.colors.orange, linewidth=0.3 ) renderer.add(lines_actor) window.record(renderer, n_frames=1, out_path=fname, size=(900, 900))
sft_target = load_trk(target_file, "same", bbox_valid_check=False) target = sft_target.streamlines target_header = create_tractogram_header(atlas_file, *sft_atlas.space_attribute) """ let's visualize atlas tractogram and target tractogram before registration """ interactive = False ren = window.Renderer() ren.SetBackground(1, 1, 1) ren.add(actor.line(atlas, colors=(1, 0, 1))) ren.add(actor.line(target, colors=(1, 1, 0))) window.record(ren, out_path='tractograms_initial.png', size=(600, 600)) if interactive: window.show(ren) """ .. figure:: tractograms_initial.png :align: center Atlas and target before registration. """ """ We will register target tractogram to model atlas' space using streamlinear registeration (SLR) [Garyfallidis15]_ """ moved, transform, qb_centroids1, qb_centroids2 = whole_brain_slr(
scene.add(slice_actor2) scene.reset_camera() scene.zoom(1.4) ############################################################################### # In order to interact with the data you will need to uncomment the line below. # window.show(scene, size=(600, 600), reset_camera=False) ############################################################################### # Otherwise, you can save a screenshot using the following command. window.record(scene, out_path='slices.png', size=(600, 600), reset_camera=False) ############################################################################### # Render slices from FA with your colormap # ======================================== # It is also possible to set the colormap of your preference. Here we are # loading an FA image and showing it in a non-standard way using an HSV # colormap. fname_fa = os.path.join(os.path.expanduser('~'), '.dipy', 'exp_bundles_and_maps', 'bundles_2_subjects', 'subj_1', 'fa_1x1x1.nii.gz') img = nib.load(fname_fa)
text += 'Actor ID ' + str(id(picked_info['actor'])) text_block.message = text showm.render() ############################################################################### # Bind the callback to the actor fury_actor.AddObserver('LeftButtonPressEvent', left_click_callback, 1) ############################################################################### # Make the window appear showm = window.ShowManager(scene, size=(1024, 768), order_transparent=True) showm.initialize() scene.add(panel) ############################################################################### # Change interactive to True to start interacting with the scene interactive = False if interactive: showm.start() ############################################################################### # Save the current framebuffer in a PNG file window.record(showm.scene, size=(1024, 768), out_path="viz_picking.png")
# # This is the default option when you are using ``line`` or ``streamtube``. renderer = window.Renderer() stream_actor = actor.line(bundle_native) renderer.set_camera(position=(-176.42, 118.52, 128.20), focal_point=(113.30, 128.31, 76.56), view_up=(0.18, 0.00, 0.98)) renderer.add(stream_actor) # Uncomment the line below to show to display the window # window.show(renderer, size=(600, 600), reset_camera=False) window.record(renderer, out_path='bundle1.png', size=(600, 600)) ############################################################################### # You may wonder how we knew how to set the camera. This is very easy. You just # need to run ``window.show`` once see how you want to see the object and then # close the window and call the ``camera_info`` method which prints the # position, focal point and view up vectors of the camera. renderer.camera_info() ############################################################################### # Show every point with a value from a volume with default colormap # ================================================================= # # Here we will need to input the ``fa`` map in ``streamtube`` or ``line``.
# Create a scene to start. scene = window.Scene() ############################################################################## # Load an image (png, bmp, jpeg or jpg) using ``io.load_image``. In this # example, we will use ``read_viz_textures`` to access an image of the # Earth's surface from the fury Github after using ''fetch_viz_textures()'' # to download the available textures. fetch_viz_textures() filename = read_viz_textures("1_earth_8k.jpg") image = io.load_image(filename) ############################################################################## # Next, use ``actor.texture_on_sphere`` to add a sphere with the texture from # your loaded image to the already existing scene. # To add a texture to your scene as visualized on a plane, use # ``actor.texture`` instead. scene.add(actor.texture_on_sphere(image)) ############################################################################## # Lastly, record the scene, or set interactive to True if you would like to # manipulate your new sphere. interactive = False if interactive: window.show(scene, size=(600, 600), reset_camera=False) window.record(scene, size=(900, 768), out_path="viz_texture.png")
############################################################################### # Then we can draw a solid circle, or disk. disk = ui.Disk2D(outer_radius=50, center=(400, 200), color=(1, 1, 0)) ############################################################################### # Add an inner radius to make a ring. ring = ui.Disk2D(outer_radius=50, inner_radius=45, center=(500, 600), color=(0, 1, 1)) ############################################################################### # Now that all the elements have been initialised, we add them to the show # manager. current_size = (800, 800) show_manager = window.ShowManager(size=current_size, title="FURY Shapes Example") show_manager.scene.add(rect) show_manager.scene.add(disk) show_manager.scene.add(ring) interactive = False if interactive: show_manager.start() window.record(show_manager.scene, size=current_size, out_path="viz_shapes.png")
p.applyExternalForce(blue_ball, -1, forceObj=[40000, 0, 0], posObj=blue_pos, flags=p.WORLD_FRAME) apply_force = 0 sync_actor(blue_ball_actor, blue_ball) sync_actor(red_ball_actor, red_ball) # Get various collision information using `p.getContactPoints`. contact = p.getContactPoints(red_ball, blue_ball, -1, -1) if len(contact) != 0: tb.message = "Collision!!" p.stepSimulation() if cnt == 50: showm.exit() showm.add_timer_callback(True, duration, timer_callback) interactive = False if interactive: showm.start() window.record(scene, size=(900, 700), out_path="viz_ball_collide.png")
size=(900, 768), reset_camera=False, order_transparent=True) showm.initialize() tb = ui.TextBlock2D(bold=True) # use itertools to avoid global variables counter = itertools.count() def timer_callback(_obj, _event): cnt = next(counter) tb.message = "Let's count up to 100 and exit :" + str(cnt) showm.scene.azimuth(0.05 * cnt) sphere_actor.GetProperty().SetOpacity(cnt / 100.) showm.render() if cnt == 100: showm.exit() scene.add(tb) # Run every 200 milliseconds showm.add_timer_callback(True, 200, timer_callback) showm.start() window.record(showm.scene, size=(900, 768), out_path="viz_timer.png")
############################################################################ # The below arrow actor is generated by repeating the arrow primitive. arrow_actor = actor.arrow(centers, dirs, colors=colors, scales=1.5) ############################################################################ # repeating what we did but this time with random centers, directions, and # colors. cen2 = np.random.rand(5, 3) dir2 = np.random.rand(5, 3) cols2 = np.random.rand(5, 3) arrow_actor2 = actor.arrow(cen2, dir2, colors=cols2, scales=1.5) scene = window.Scene() ############################################################################ # Adding our Arrow actors to scene. scene.add(arrow_actor) scene.add(arrow_actor2) interactive = False if interactive: window.show(scene, size=(600, 600)) window.record(scene, out_path='viz_arrow.png', size=(600, 600))
colors = np.array([0, 0, 1]) ############################################################################ # The below sphere actor is generated by repeating the sphere primitive. prim_sphere_actor = actor.sphere(centers, colors=colors, radii=5) ############################################################################ # This time, we're using vtkSphereSource to generate the sphere actor cen2 = np.add(centers, np.array([12, 0, 0])) cols2 = np.array([1, 0, 0]) vtk_sphere_actor = actor.sphere(cen2, colors=cols2, radii=5, use_primitive=False) scene = window.Scene() ############################################################################ # Adding our sphere actors to scene. scene.add(prim_sphere_actor) scene.add(vtk_sphere_actor) interactive = False if interactive: window.show(scene, size=(600, 600)) window.record(scene, out_path='viz_sphere.png', size=(600, 600))
# ================================== # # Now we create a callback for setting the chosen color. def change_color(combobox): label.color = colors[combobox.selected_text] # `on_change` callback is set to `change_color` method so that # it's called whenever a different option is selected. color_combobox.on_change = change_color ############################################################################### # Show Manager # ================================== # # Now we add label and combobox to the scene. current_size = (400, 400) showm = window.ShowManager(size=current_size, title="ComboBox UI Example") showm.scene.add(label, color_combobox) # To interact with the UI, set interactive = True interactive = False if interactive: showm.start() window.record(showm.scene, out_path="combobox_ui.png", size=(400, 400))
orn = p.getQuaternionFromEuler([0, 0, 0]) p.changeConstraint(root_robe_c, pivot, jointChildFrameOrientation=orn, maxForce=500) # Sync base and chain. sync_actor(base_actor, rope) sync_joints(rope_actor, rope) utils.update_actor(rope_actor) # Simulate a step. p.stepSimulation() # Exit after 2000 steps of simulation. if cnt == 130: showm.exit() # Add the timer callback to showmanager. # Increasing the duration value will slow down the simulation. showm.add_timer_callback(True, 1, timer_callback) interactive = False # start simulation if interactive: showm.start() window.record(scene, size=(900, 768), out_path="viz_chain.png")
# # This is the default option when you are using ``line`` or ``streamtube``. scene = window.Scene() stream_actor = actor.line(bundle_native) scene.set_camera(position=(-176.42, 118.52, 128.20), focal_point=(113.30, 128.31, 76.56), view_up=(0.18, 0.00, 0.98)) scene.add(stream_actor) # Uncomment the line below to show to display the window # window.show(scene, size=(600, 600), reset_camera=False) window.record(scene, out_path='bundle1.png', size=(600, 600)) ############################################################################### # You may wonder how we knew how to set the camera. This is very easy. You just # need to run ``window.show`` once see how you want to see the object and then # close the window and call the ``camera_info`` method which prints the # position, focal point and view up vectors of the camera. scene.camera_info() ############################################################################### # Show every point with a value from a volume with default colormap # ================================================================= # # Here we will need to input the ``fa`` map in ``streamtube`` or ``line``.
surface_opacity = 0.5 surface_color = [0, 1, 1] seedroi_actor = actor.contour_from_roi(seed_mask, affine, surface_color, surface_opacity) ############################################################################### # Next, we initialize a ''Scene'' object and add both actors # to the rendering. scene = window.Scene() scene.add(streamlines_actor) scene.add(seedroi_actor) ############################################################################### # If you uncomment the following line, the rendering will pop up in an # interactive window. interactive = False if interactive: window.show(scene) scene.zoom(1.5) scene.reset_clipping_range() window.record(scene, out_path='contour_from_roi_tutorial.png', size=(1200, 900), reset_camera=False)
size = obj.GetSize() size_change = [size[0] - size_old[0], 0] panel.re_align(size_change) show_m.initialize() ############################################################################### # Finally, please set the following variable to ``True`` to interact with the # datasets in 3D. interactive = False ren.zoom(1.5) ren.reset_clipping_range() if interactive: show_m.add_window_callback(win_callback) show_m.render() show_m.start() else: window.record(ren, out_path='bundles_and_3_slices.png', size=(1200, 900), reset_camera=False) del show_m
[3, 5, 7]], dtype='i8') #good utils.set_polydata_vertices(my_polydata, my_vertices) utils.set_polydata_triangles(my_polydata, my_triangles) file_name = "my_star2D.vtk" save_polydata(my_polydata, file_name) print("Surface saved in " + file_name) star_polydata = load_polydata(file_name) star_vertices = utils.get_polydata_vertices(star_polydata) colors = star_vertices * 255 utils.set_polydata_colors(star_polydata, colors) print("new surface colors") print(utils.get_polydata_colors(star_polydata)) # get vtkActor star_actor = utils.get_actor_from_polydata(star_polydata) star_actor.GetProperty().BackfaceCullingOff() # Create a scene scene = window.Scene() scene.add(star_actor) scene.set_camera(position=(0, 0, 7), focal_point=(0, 0, 0)) scene.zoom(3) # display # window.show(scene, size=(1000, 1000), reset_camera=False) this allows the picture to be moved around window.record(scene, out_path='star2D.png', size=(600, 600))
flags=p.WORLD_FRAME) apply_force = False # Set position and orientation of the ball. sync_actor(ball_actor, ball) # Updating the position and orientation of each individual brick. for idx, brick in enumerate(bricks): sync_brick(idx, brick) utils.update_actor(brick_actor) # Simulate a step. p.stepSimulation() # Exit after 2000 steps of simulation. if cnt == 130: showm.exit() # Add the timer callback to showmanager. # Increasing the duration value will slow down the simulation. showm.add_timer_callback(True, 1, timer_callback) interactive = False # start simulation if interactive: showm.start() window.record(scene, out_path="viz_brick_wall.png", size=(900, 768))
def main(): # reads the tractography data in trk format # extracts streamlines and the file header. Streamlines should be in the same coordinate system as the FA map (used later). # input example: '/home/Example_data/tracts.trk' tractography_file = input( "Please, specify the file with tracts that you would like to analyse. File should be in the trk format. " ) streams, hdr = load_trk(tractography_file) # for old DIPY version # sft = load_trk(tractography_file, tractography_file) # streams = sft.streamlines streams_array = np.asarray(streams) print('imported tractography data:' + tractography_file) # load T1fs_conform image that operates in the same coordinates as simnibs except for the fact the center of mesh # is located at the image center # T1fs_conform image should be generated in advance during the head meshing procedure # input example: fname_T1='/home/Example_data/T1fs_conform.nii.gz' fname_T1 = input( "Please, specify the T1fs_conform image that has been generated during head meshing procedure. " ) data_T1, affine_T1 = load_nifti(fname_T1) # load FA image in the same coordinates as tracts # input example:fname_FA='/home/Example_data/DTI_FA.nii' fname_FA = input("Please, specify the FA image. ") data_FA, affine_FA = load_nifti(fname_FA) print('loaded T1fs_conform.nii and FA images') # specify the head mesh file that is used later in simnibs to simulate induced electric field # input example:'/home/Example_data/SUBJECT_MESH.msh' global mesh_path mesh_path = input("Please, specify the head mesh file. ") last_slach = max([i for i, ltr in enumerate(mesh_path) if ltr == '/']) + 1 global subject_name subject_name = mesh_path[last_slach:-4] # specify the directory where you would like to save your simulation results # input example:'/home/Example_data/Output' global out_dir out_dir = input( "Please, specify the directory where you would like to save your simulation results. " ) out_dir = out_dir + '/simulation_at_pos_' # Co-registration of T1fs_conform and FA images. Performed in 4 steps. # Step 1. Calculation of the center of mass transform. Used later as starting transform. c_of_mass = transform_centers_of_mass(data_T1, affine_T1, data_FA, affine_FA) print('calculated c_of_mass transformation') # Step 2. Calculation of a 3D translation transform. Used in the next step as starting transform. nbins = 32 sampling_prop = None metric = MutualInformationMetric(nbins, sampling_prop) level_iters = [10000, 1000, 100] sigmas = [3.0, 1.0, 0.0] factors = [4, 2, 1] affreg = AffineRegistration(metric=metric, level_iters=level_iters, sigmas=sigmas, factors=factors) transform = TranslationTransform3D() params0 = None starting_affine = c_of_mass.affine translation = affreg.optimize(data_T1, data_FA, transform, params0, affine_T1, affine_FA, starting_affine=starting_affine) print('calculated 3D translation transform') # Step 3. Calculation of a Rigid 3D transform. Used in the next step as starting transform transform = RigidTransform3D() params0 = None starting_affine = translation.affine rigid = affreg.optimize(data_T1, data_FA, transform, params0, affine_T1, affine_FA, starting_affine=starting_affine) print('calculated Rigid 3D transform') # Step 4. Calculation of an affine transform. Used for co-registration of T1 and FA images. transform = AffineTransform3D() params0 = None starting_affine = rigid.affine affine = affreg.optimize(data_T1, data_FA, transform, params0, affine_T1, affine_FA, starting_affine=starting_affine) print('calculated Affine 3D transform') identity = np.eye(4) inv_affine_FA = np.linalg.inv(affine_FA) inv_affine_T1 = np.linalg.inv(affine_T1) inv_affine = np.linalg.inv(affine.affine) # transforming streamlines to FA space new_streams_FA = streamline.transform_streamlines(streams, inv_affine_FA) new_streams_FA_array = np.asarray(new_streams_FA) T1_to_FA = np.dot(inv_affine_FA, np.dot(affine.affine, affine_T1)) FA_to_T1 = np.linalg.inv(T1_to_FA) # transforming streamlines from FA to T1 space new_streams_T1 = streamline.transform_streamlines(new_streams_FA, FA_to_T1) global new_streams_T1_array new_streams_T1_array = np.asarray(new_streams_T1) # calculating amline derivatives along the streamlines to get the local orientation of the streamlines global streams_array_derivative streams_array_derivative = copy.deepcopy(new_streams_T1_array) print('calculating amline derivatives') for stream in range(len(new_streams_T1_array)): my_steam = new_streams_T1_array[stream] for t in range(len(my_steam[:, 0])): streams_array_derivative[stream][t, 0] = my_deriv(t, my_steam[:, 0]) streams_array_derivative[stream][t, 1] = my_deriv(t, my_steam[:, 1]) streams_array_derivative[stream][t, 2] = my_deriv(t, my_steam[:, 2]) deriv_norm = np.linalg.norm(streams_array_derivative[stream][t, :]) streams_array_derivative[stream][ t, :] = streams_array_derivative[stream][t, :] / deriv_norm # to create a torus representing a coil in an interactive window torus = vtk.vtkParametricTorus() torus.SetRingRadius(5) torus.SetCrossSectionRadius(2) torusSource = vtk.vtkParametricFunctionSource() torusSource.SetParametricFunction(torus) torusSource.SetScalarModeToPhase() torusMapper = vtk.vtkPolyDataMapper() torusMapper.SetInputConnection(torusSource.GetOutputPort()) torusMapper.SetScalarRange(0, 360) torusActor = vtk.vtkActor() torusActor.SetMapper(torusMapper) torus_pos_x = 100 torus_pos_y = 129 torus_pos_z = 211 torusActor.SetPosition(torus_pos_x, torus_pos_y, torus_pos_z) list_streams_T1 = list(new_streams_T1) # adding one fictive bundle of length 1 with coordinates [0,0,0] to avoid some bugs with actor.line during visualization list_streams_T1.append(np.array([0, 0, 0])) global bundle_native bundle_native = list_streams_T1 # generating a list of colors to visualize later the stimualtion effects effect_max = 0.100 effect_min = -0.100 global colors colors = [ np.random.rand(*current_streamline.shape) for current_streamline in bundle_native ] for my_streamline in range(len(bundle_native) - 1): my_stream = copy.deepcopy(bundle_native[my_streamline]) for point in range(len(my_stream)): colors[my_streamline][point] = vtkplotter.colors.colorMap( (effect_min + effect_max) / 2, name='jet', vmin=effect_min, vmax=effect_max) colors[my_streamline + 1] = vtkplotter.colors.colorMap(effect_min, name='jet', vmin=effect_min, vmax=effect_max) # Vizualization of fibers over T1 # i_coord = 0 # j_coord = 0 # k_coord = 0 # global number_of_stimulations number_of_stimulations = 0 actor_line_list = [] scene = window.Scene() scene.clear() scene.background((0.5, 0.5, 0.5)) world_coords = False shape = data_T1.shape lut = actor.colormap_lookup_table(scale_range=(effect_min, effect_max), hue_range=(0.4, 1.), saturation_range=(1, 1.)) # # the lines below is for a non-interactive demonstration run only. # # they should remain commented unless you set "interactive" to False # lut, colors = change_TMS_effects(torus_pos_x, torus_pos_y, torus_pos_z) # bar = actor.scalar_bar(lut) # bar.SetTitle("TMS effect") # bar.SetHeight(0.3) # bar.SetWidth(0.10) # bar.SetPosition(0.85, 0.3) # scene.add(bar) actor_line_list.append( actor.line(bundle_native, colors, linewidth=5, fake_tube=True, lookup_colormap=lut)) if not world_coords: image_actor_z = actor.slicer(data_T1, identity) else: image_actor_z = actor.slicer(data_T1, identity) slicer_opacity = 0.6 image_actor_z.opacity(slicer_opacity) image_actor_x = image_actor_z.copy() x_midpoint = int(np.round(shape[0] / 2)) image_actor_x.display_extent(x_midpoint, x_midpoint, 0, shape[1] - 1, 0, shape[2] - 1) image_actor_y = image_actor_z.copy() y_midpoint = int(np.round(shape[1] / 2)) image_actor_y.display_extent(0, shape[0] - 1, y_midpoint, y_midpoint, 0, shape[2] - 1) """ Connect the actors with the scene. """ scene.add(actor_line_list[0]) scene.add(image_actor_z) scene.add(image_actor_x) scene.add(image_actor_y) show_m = window.ShowManager(scene, size=(1200, 900)) show_m.initialize() """ Create sliders to move the slices and change their opacity. """ line_slider_z = ui.LineSlider2D(min_value=0, max_value=shape[2] - 1, initial_value=shape[2] / 2, text_template="{value:.0f}", length=140) line_slider_x = ui.LineSlider2D(min_value=0, max_value=shape[0] - 1, initial_value=shape[0] / 2, text_template="{value:.0f}", length=140) line_slider_y = ui.LineSlider2D(min_value=0, max_value=shape[1] - 1, initial_value=shape[1] / 2, text_template="{value:.0f}", length=140) opacity_slider = ui.LineSlider2D(min_value=0.0, max_value=1.0, initial_value=slicer_opacity, length=140) """ Сallbacks for the sliders. """ def change_slice_z(slider): z = int(np.round(slider.value)) image_actor_z.display_extent(0, shape[0] - 1, 0, shape[1] - 1, z, z) def change_slice_x(slider): x = int(np.round(slider.value)) image_actor_x.display_extent(x, x, 0, shape[1] - 1, 0, shape[2] - 1) def change_slice_y(slider): y = int(np.round(slider.value)) image_actor_y.display_extent(0, shape[0] - 1, y, y, 0, shape[2] - 1) def change_opacity(slider): slicer_opacity = slider.value image_actor_z.opacity(slicer_opacity) image_actor_x.opacity(slicer_opacity) image_actor_y.opacity(slicer_opacity) line_slider_z.on_change = change_slice_z line_slider_x.on_change = change_slice_x line_slider_y.on_change = change_slice_y opacity_slider.on_change = change_opacity """ Сreate text labels to identify the sliders. """ def build_label(text): label = ui.TextBlock2D() label.message = text label.font_size = 18 label.font_family = 'Arial' label.justification = 'left' label.bold = False label.italic = False label.shadow = False label.background = (0, 0, 0) label.color = (1, 1, 1) return label line_slider_label_z = build_label(text="Z Slice") line_slider_label_x = build_label(text="X Slice") line_slider_label_y = build_label(text="Y Slice") opacity_slider_label = build_label(text="Opacity") """ Create a ``panel`` to contain the sliders and labels. """ panel = ui.Panel2D(size=(300, 200), color=(1, 1, 1), opacity=0.1, align="right") panel.center = (1030, 120) panel.add_element(line_slider_label_x, (0.1, 0.75)) panel.add_element(line_slider_x, (0.38, 0.75)) panel.add_element(line_slider_label_y, (0.1, 0.55)) panel.add_element(line_slider_y, (0.38, 0.55)) panel.add_element(line_slider_label_z, (0.1, 0.35)) panel.add_element(line_slider_z, (0.38, 0.35)) panel.add_element(opacity_slider_label, (0.1, 0.15)) panel.add_element(opacity_slider, (0.38, 0.15)) scene.add(panel) """ Create a ``panel`` to show the value of a picked voxel. """ label_position = ui.TextBlock2D(text='Position:') label_value = ui.TextBlock2D(text='Value:') result_position = ui.TextBlock2D(text='') result_value = ui.TextBlock2D(text='') text2 = ui.TextBlock2D(text='Calculate') panel_picking = ui.Panel2D(size=(250, 125), color=(1, 1, 1), opacity=0.1, align="left") panel_picking.center = (200, 120) panel_picking.add_element(label_position, (0.1, 0.75)) panel_picking.add_element(label_value, (0.1, 0.45)) panel_picking.add_element(result_position, (0.45, 0.75)) panel_picking.add_element(result_value, (0.45, 0.45)) panel_picking.add_element(text2, (0.1, 0.15)) icon_files = [] icon_files.append(('left', read_viz_icons(fname='circle-left.png'))) button_example = ui.Button2D(icon_fnames=icon_files, size=(100, 30)) panel_picking.add_element(button_example, (0.5, 0.1)) def change_text_callback(i_ren, obj, button): text2.message = str(i_coord) + ' ' + str(j_coord) + ' ' + str(k_coord) torusActor.SetPosition(i_coord, j_coord, k_coord) print(i_coord, j_coord, k_coord) lut, colors = change_TMS_effects(i_coord, j_coord, k_coord) scene.rm(actor_line_list[0]) actor_line_list.append( actor.line(bundle_native, colors, linewidth=5, fake_tube=True, lookup_colormap=lut)) scene.add(actor_line_list[1]) nonlocal number_of_stimulations global bar if number_of_stimulations > 0: scene.rm(bar) else: number_of_stimulations = number_of_stimulations + 1 bar = actor.scalar_bar(lut) bar.SetTitle("TMS effect") bar.SetHeight(0.3) bar.SetWidth(0.10) # the width is set first bar.SetPosition(0.85, 0.3) scene.add(bar) actor_line_list.pop(0) i_ren.force_render() button_example.on_left_mouse_button_clicked = change_text_callback scene.add(panel_picking) scene.add(torusActor) def left_click_callback(obj, ev): """Get the value of the clicked voxel and show it in the panel.""" event_pos = show_m.iren.GetEventPosition() obj.picker.Pick(event_pos[0], event_pos[1], 0, scene) global i_coord, j_coord, k_coord i_coord, j_coord, k_coord = obj.picker.GetPointIJK() print(i_coord, j_coord, k_coord) result_position.message = '({}, {}, {})'.format( str(i_coord), str(j_coord), str(k_coord)) result_value.message = '%.8f' % data_T1[i_coord, j_coord, k_coord] torusActor.SetPosition(i_coord, j_coord, k_coord) image_actor_z.AddObserver('LeftButtonPressEvent', left_click_callback, 1.0) global size size = scene.GetSize() def win_callback(obj, event): global size if size != obj.GetSize(): size_old = size size = obj.GetSize() size_change = [size[0] - size_old[0], 0] panel.re_align(size_change) show_m.initialize() """ Set the following variable to ``True`` to interact with the datasets in 3D. """ interactive = True scene.zoom(2.0) scene.reset_clipping_range() scene.set_camera(position=(-642.07, 495.40, 148.49), focal_point=(127.50, 127.50, 127.50), view_up=(0.02, -0.01, 1.00)) if interactive: show_m.add_window_callback(win_callback) show_m.render() show_m.start() else: window.record(scene, out_path=out_dir + '/bundles_and_effects.png', size=(1200, 900), reset_camera=True)
def plot_an_odf_slice(odf_4d, full_sphere, background_data, tile_size, filename, centroid, axis, camera_distance, subtract_iso, mask_image): from fury import actor, window view_up = [(0., 0., 1.), (0., 0., 1.), (0., -1., 0.)] # Adjust the centroid so it's only a single slice slicenum = int(np.round(centroid)[axis]) centroid[axis] = 0 position = centroid.copy() position[axis] = position[axis] + camera_distance # Roll if viewing an axial slice roll = 3 if axis == 2 else 0 position[1] = position[1] - roll # Ensure the dimensions reflect that there is only one slice new_shape = list(odf_4d.shape) new_shape[axis] = 1 image_shape = new_shape[:3] if axis == 0: odf_slice = odf_4d[slicenum, :, :, :].reshape(new_shape) image_slice = background_data[slicenum, :, :].reshape(image_shape) elif axis == 1: odf_slice = odf_4d[:, slicenum, :, :].reshape(new_shape) image_slice = background_data[:, slicenum, :].reshape(image_shape) elif axis == 2: odf_slice = odf_4d[:, :, slicenum, :].reshape(new_shape) image_slice = background_data[:, :, slicenum].reshape(image_shape) # Tile to get the whole ODF odf_slice = np.tile(odf_slice, (1, 1, 1, 2)) if subtract_iso: odf_slice = odf_slice - odf_slice.min(3, keepdims=True) # Make graphics objects odf_actor = actor.odf_slicer(odf_slice, sphere=full_sphere, colormap=None, scale=0.6, mask=image_slice) image_actor = actor.slicer(image_slice, opacity=0.6, interpolation='nearest') image_size = (tile_size, tile_size) scene = window.Scene() scene.add(image_actor) scene.add(odf_actor) xfov_min, xfov_max = 0, new_shape[0] - 1 yfov_min, yfov_max = 0, new_shape[1] - 1 zfov_min, zfov_max = 0, new_shape[2] - 1 odf_actor.display_extent(xfov_min, xfov_max, yfov_min, yfov_max, zfov_min, zfov_max) image_actor.display_extent(xfov_min, xfov_max, yfov_min, yfov_max, zfov_min, zfov_max) scene.set_camera(focal_point=tuple(centroid), position=tuple(position), view_up=view_up[axis]) window.record(scene, out_path=filename, reset_camera=False, size=image_size) scene.clear()
def plot_peak_slice(odf_4d, sphere, background_data, out_file, axis, slicenum, mask_data, tile_size=1200, normalize_peaks=True): from fury import actor, window view_up = [(0., 0., 1.), (0., 0., 1.), (0., -1., 0.)] # Make a slice mask to reduce memory new_shape = list(odf_4d.shape) new_shape[axis] = 1 image_shape = new_shape[:3] midpoint = (new_shape[0] / 2., new_shape[1] / 2., new_shape[2] / 2.) if axis == 0: odf_slice = odf_4d[slicenum, :, :, :].reshape(new_shape) image_slice = background_data[slicenum, :, :].reshape(image_shape) mask_slice = mask_data[slicenum, :, :].reshape(image_shape) camera_dist = max(midpoint[1], midpoint[2]) * np.pi elif axis == 1: odf_slice = odf_4d[:, slicenum, :, :].reshape(new_shape) image_slice = background_data[:, slicenum, :].reshape(image_shape) mask_slice = mask_data[:, slicenum, :].reshape(image_shape) camera_dist = max(midpoint[0], midpoint[2]) * np.pi elif axis == 2: odf_slice = odf_4d[:, :, slicenum, :].reshape(new_shape) image_slice = background_data[:, :, slicenum].reshape(image_shape) mask_slice = mask_data[:, :, slicenum].reshape(image_shape) camera_dist = max(midpoint[0], midpoint[1]) * np.pi position = list(midpoint) position[axis] += camera_dist # Find the actual peaks peak_dirs, peak_values = peaks_from_odfs(odf_slice, sphere, relative_peak_threshold=.1, min_separation_angle=15, mask=mask_slice, normalize_peaks=normalize_peaks, npeaks=3) if normalize_peaks: peak_values = peak_values / peak_values.max() * np.pi peak_actor = actor.peak_slicer(peak_dirs, peak_values, colors=None) image_actor = actor.slicer(image_slice, opacity=0.6, interpolation='nearest') image_size = (tile_size, tile_size) scene = window.Scene() scene.add(image_actor) scene.add(peak_actor) xfov_min, xfov_max = 0, new_shape[0] - 1 yfov_min, yfov_max = 0, new_shape[1] - 1 zfov_min, zfov_max = 0, new_shape[2] - 1 peak_actor.display_extent(xfov_min, xfov_max, yfov_min, yfov_max, zfov_min, zfov_max) image_actor.display_extent(xfov_min, xfov_max, yfov_min, yfov_max, zfov_min, zfov_max) scene.set_camera(focal_point=tuple(midpoint), position=tuple(position), view_up=view_up[axis]) window.record(scene, out_path=out_file, reset_camera=False, size=image_size) scene.clear()
-1, forceObj=[-500, 0, 0], posObj=pos, flags=p.WORLD_FRAME) apply_force = False pos = p.getLinkState(rope, p.getNumJoints(rope) - 1)[4] ball_actor.SetPosition(*pos) sync_chain(rope_actor, rope) utils.update_actor(brick_actor) utils.update_actor(rope_actor) # Simulate a step. p.stepSimulation() if cnt == 130: showm.exit() # Add the timer callback to showmanager. # Increasing the duration value will slow down the simulation. showm.add_timer_callback(True, 1, timer_callback) interactive = False # start simulation if interactive: showm.start() window.record(scene, size=(900, 768), out_path="viz_wrecking_ball.png")
showm.initialize() counter = itertools.count() # After some steps we will remove the no_depth_test effect def timer_callback(obj, event): cnt = next(counter) showm.render() # we will rotate the visualization just to help you to see # the results of each specifc opengl-state showm.scene.azimuth(1) if cnt == 400: remove_observer_from_actor(actor_no_depth_test, id_observer) shader_apply_effects(showm.window, actor_no_depth_test, effects=window.gl_set_additive_blending) if cnt == 1000: showm.exit() interactive = False showm.add_timer_callback(interactive, 5, timer_callback) if interactive: showm.start() window.record(scene, out_path='viz_fine_tuning_gl_context.png', size=(600, 600))
############################################################################### # The final step! Visualize the result of our creation! Also, we need to move # the camera a little bit farther from the network. you can increase the # parameter max_iteractions of the timer callback to let the animation run for # more time. showm = window.ShowManager(scene, reset_camera=False, size=(900, 768), order_transparent=True, multi_samples=8) showm.initialize() scene.set_camera(position=(0, 0, -300)) timer_callback = new_layout_timer(showm, edges, vertices_count, max_iterations=200, vertex_initial_positions=positions) # Run every 16 milliseconds showm.add_timer_callback(True, 16, timer_callback) showm.start() window.record(showm.scene, size=(900, 768), out_path="viz_animated_networks.png")