import os import numpy as np from dipy.direction.peaks import (PeaksAndMetrics, reshape_peaks_for_visualization) from dipy.core.sphere import Sphere from dipy.io.image import save_nifti from distutils.version import LooseVersion # Conditional import machinery for pytables from dipy.utils.optpkg import optional_package # Allow import, but disable doctests, if we don't have pytables tables, have_tables, _ = optional_package('tables', 'PyTables is not installed') # Useful variable for backward compatibility. TABLES_LESS_3_0 = LooseVersion( tables.__version__) < "3.0" if have_tables else False def _safe_save(f, group, array, name): """ Safe saving of arrays with specific names Parameters ---------- f : HDF5 file handle group : HDF5 group array : array name : string
from scipy.ndimage.morphology import binary_dilation from dipy.utils.optpkg import optional_package from dipy.io import read_bvals_bvecs from dipy.io.image import load_nifti, save_nifti from dipy.core.gradients import gradient_table from dipy.segment.mask import median_otsu from dipy.reconst.dti import TensorModel from dipy.segment.mask import segment_from_cfa from dipy.segment.mask import bounding_box from dipy.workflows.workflow import Workflow from dipy.viz.regtools import simple_plot from dipy.stats.analysis import bundle_analysis pd, have_pd, _ = optional_package("pandas") smf, have_smf, _ = optional_package("statsmodels") tables, have_tables, _ = optional_package("tables") if have_pd: import pandas as pd if have_smf: import statsmodels.formula.api as smf if have_tables: import tables class SNRinCCFlow(Workflow):
---------- .. [1] http://www.hdfgroup.org/HDF5/doc/H5.intro.html """ import numpy as np from distutils.version import LooseVersion # Conditional testing machinery for pytables from dipy.testing import doctest_skip_parser # Conditional import machinery for pytables from dipy.utils.optpkg import optional_package # Allow import, but disable doctests, if we don't have pytables tables, have_tables, _ = optional_package('tables') # Useful variable for backward compatibility. TABLES_LESS_3_0 = LooseVersion(tables.__version__) < "3.0" if have_tables else False # Make sure not to carry across setup module from * import __all__ = ['Dpy'] class Dpy(object): @doctest_skip_parser def __init__(self, fname, mode='r', compression=0): """ Advanced storage system for tractography based on HDF5 Parameters ------------
http://www.vtk.org/Wiki/VTK/Tutorials/External_Tutorials """ from __future__ import division, print_function, absolute_import from warnings import warn from dipy.utils.six.moves import xrange import numpy as np from dipy.core.ndindex import ndindex # Conditional import machinery for vtk from dipy.utils.optpkg import optional_package # Allow import, but disable doctests if we don't have vtk vtk, have_vtk, setup_module = optional_package('vtk') colors, have_vtk_colors, _ = optional_package('vtk.util.colors') cm, have_matplotlib, _ = optional_package('matplotlib.cm') if have_matplotlib: get_cmap = cm.get_cmap else: from dipy.data import get_cmap from dipy.viz.colormap import create_colormap # a track buffer used only with picking tracks track_buffer = [] # indices buffer for the tracks ind_buffer = []
It is built using the pytables tools which in turn implement key features of the HDF5 (hierachical data format) API [1]_. References ---------- .. [1] http://www.hdfgroup.org/HDF5/doc/H5.intro.html ''' import numpy as np # Conditional import machinery for pytables from dipy.utils.optpkg import optional_package # Allow import, but disable doctests, if we don't have pytables tables, have_tables, setup_module = optional_package('tables') # Make sure not to carry across setup module from * import __all__ = ['Dpy'] class Dpy(object): def __init__(self, fname, mode='r', compression=0): ''' Advanced storage system for tractography based on HDF5 Parameters ------------ fname : str, full filename mode : 'r' read 'w' write
from distutils.version import LooseVersion from dipy.viz import fvtk from dipy import data import numpy.testing as npt from dipy.testing.decorators import xvfb_it from dipy.utils.optpkg import optional_package use_xvfb = os.environ.get('TEST_WITH_XVFB', False) if use_xvfb == 'skip': skip_it = True else: skip_it = False cm, have_matplotlib, _ = optional_package('matplotlib.cm') if have_matplotlib: import matplotlib mpl_version = LooseVersion(matplotlib.__version__) @npt.dec.skipif(not fvtk.have_vtk or not fvtk.have_vtk_colors or skip_it) @xvfb_it def test_fvtk_functions(): # This tests will fail if any of the given actors changed inputs or do # not exist # Create a renderer r = fvtk.ren()
import numpy as np from dipy.data import read_viz_icons # Conditional import machinery for vtk. from dipy.utils.optpkg import optional_package # Allow import, but disable doctests if we don't have vtk. from dipy.viz import ui, window vtk, have_vtk, setup_module = optional_package('vtk') if have_vtk: vtkInteractorStyleUser = vtk.vtkInteractorStyleUser version = vtk.vtkVersion.GetVTKSourceVersion().split(' ')[-1] major_version = vtk.vtkVersion.GetVTKMajorVersion() else: vtkInteractorStyleUser = object numpy_support, have_ns, _ = optional_package('vtk.util.numpy_support') # Cube Actors def cube_maker(color=None, size=(0.2, 0.2, 0.2), center=None): cube = vtk.vtkCubeSource() cube.SetXLength(size[0]) cube.SetYLength(size[1]) cube.SetZLength(size[2]) if center is not None: cube.SetCenter(*center) cube_mapper = vtk.vtkPolyDataMapper()
try: from numpy import nanmean except ImportError: from scipy.stats import nanmean from dipy.utils.optpkg import optional_package from dipy.utils.multiproc import determine_num_processes import dipy.core.gradients as grad import dipy.core.optimize as opt import dipy.sims.voxel as sims import dipy.data as dpd from dipy.reconst.base import ReconstModel, ReconstFit from dipy.reconst.cache import Cache from dipy.core.onetime import auto_attr joblib, has_joblib, _ = optional_package('joblib') sklearn, has_sklearn, _ = optional_package('sklearn') lm, _, _ = optional_package('sklearn.linear_model') # If sklearn is unavailable, we can fall back on nnls (but we also warn the # user that we are about to do that): if not has_sklearn: w = sklearn._msg + "\nAlternatively, you can use 'nnls' method to fit" w += " the SparseFascicleModel" warnings.warn(w) # Isotropic signal models: these are models of the part of the signal that # changes with b-value, but does not change with direction. This collection is # extensible, by inheriting from IsotropicModel/IsotropicFit below:
http://www.vtk.org/Wiki/VTK/Tutorials/External_Tutorials ''' from __future__ import division, print_function, absolute_import from warnings import warn from dipy.utils.six.moves import xrange import numpy as np from dipy.core.ndindex import ndindex # Conditional import machinery for vtk from dipy.utils.optpkg import optional_package # Allow import, but disable doctests if we don't have vtk vtk, have_vtk, setup_module = optional_package('vtk') colors, have_vtk_colors, _ = optional_package('vtk.util.colors') cm, have_matplotlib, _ = optional_package('matplotlib.cm') if have_matplotlib: get_cmap = cm.get_cmap else: from dipy.data import get_cmap # a track buffer used only with picking tracks track_buffer = [] # indices buffer for the tracks ind_buffer = [] # tempory renderer used only with picking tracks tmp_ren = None
import numpy as np from dipy.viz import regtools import numpy.testing as npt from dipy.align.metrics import SSDMetric from dipy.align.imwarp import SymmetricDiffeomorphicRegistration # Conditional import machinery for matplotlib from dipy.utils.optpkg import optional_package _, have_matplotlib, _ = optional_package('matplotlib') @npt.dec.skipif(not have_matplotlib) def test_plot_2d_diffeomorphic_map(): # Test the regtools plotting interface (lightly). mv_shape = (11, 12) moving = np.random.rand(*mv_shape) st_shape = (13, 14) static = np.random.rand(*st_shape) dim = static.ndim metric = SSDMetric(dim) level_iters = [200, 100, 50, 25] sdr = SymmetricDiffeomorphicRegistration(metric, level_iters, inv_iter=50) mapping = sdr.optimize(static, moving) # Smoke testing of plots ff = regtools.plot_2d_diffeomorphic_map(mapping, 10) # Defualt shape is static shape, moving shape npt.assert_equal(ff[0].shape, st_shape) npt.assert_equal(ff[1].shape, mv_shape)
from warnings import warn from math import factorial import numpy as np from scipy.special import genlaguerre, gamma, hyp2f1 from dipy.reconst.cache import Cache from dipy.reconst.multi_voxel import multi_voxel_fit from dipy.reconst.shm import real_sph_harm from dipy.core.geometry import cart2sphere from dipy.utils.optpkg import optional_package cvxopt, have_cvxopt, _ = optional_package("cvxopt") if have_cvxopt: import cvxopt.solvers class ShoreModel(Cache): r"""Simple Harmonic Oscillator based Reconstruction and Estimation (SHORE) [1]_ of the diffusion signal. The main idea is to model the diffusion signal as a linear combination of continuous functions $\phi_i$, ..math:: :nowrap: \begin{equation} S(\mathbf{q})= \sum_{i=0}^I c_{i} \phi_{i}(\mathbf{q}).
from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import vtk from dipy.viz import utils from dipy.utils.optpkg import optional_package numpy_support, have_ns, _ = optional_package('vtk.util.numpy_support') def label(text='Origin', pos=(0, 0, 0), scale=(0.2, 0.2, 0.2), color=(1, 1, 1)): atext = vtk.vtkVectorText() atext.SetText(text) textm = vtk.vtkPolyDataMapper() textm.SetInputConnection(atext.GetOutputPort()) texta = vtk.vtkFollower() texta.SetMapper(textm) texta.SetScale(scale) texta.GetProperty().SetColor(color) texta.SetPosition(pos) return texta
# Init file for visualization package from __future__ import division, print_function, absolute_import from dipy.utils.optpkg import optional_package # Allow import, but disable doctests if we don't have fury fury, have_fury, _ = optional_package('fury') if have_fury: from fury import actor, window, colormap, interactor, ui, utils from fury.window import vtk from fury.data import (fetch_viz_icons, read_viz_icons, DATA_DIR as FURY_DATA_DIR) # We make the visualization requirements optional imports: _, has_mpl, _ = optional_package('matplotlib', "You do not have Matplotlib installed. Some" " visualization functions might not work for" " you") if has_mpl: from . import projections
import sys import importlib import warnings import pytest from dipy.utils.optpkg import optional_package fury, has_fury, _ = optional_package('fury') @pytest.mark.skipif(has_fury, reason="Skipped because Fury is installed") def test_viz_import_warning(): with warnings.catch_warnings(record=True) as w: module_path = 'dipy.viz' if module_path in sys.modules: importlib.reload(sys.modules[module_path]) else: importlib.import_module(module_path) assert len(w) == 1
import tempfile from dipy.utils.optpkg import optional_package from sklearn.impute import SimpleImputer from sklearn.metrics import r2_score from sklearn.model_selection import train_test_split from sklearn.utils.validation import check_X_y, check_is_fitted keras_msg = ( "To use afqinsight's convolutional neural nets for tractometry data, you will need " "to have tensorflow and kerastuner installed. You can do this by installing " "afqinsight with `pip install afqinsight[tf]`, or by separately installing these packages " "with `pip install tensorflow keras-tuner`." ) kt, _, _ = optional_package("keras_tuner", keras_msg) tf, has_tf, _ = optional_package("tensorflow", keras_msg) if has_tf: from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Conv1D, Flatten, MaxPool1D, Dropout from tensorflow.keras.callbacks import ModelCheckpoint def build_model(hp, conv_layers, input_shape): """Build a keras model. Uses keras tuner to build model - can control # layers, # filters in each layer, kernel size, regularization etc Parameters
""" Utility functions for file formats """ import logging import numbers import os from dipy.utils.optpkg import optional_package import dipy import nibabel as nib from nibabel.streamlines import detect_format from nibabel import Nifti1Image import numpy as np pd, have_pd, _ = optional_package("pandas") if have_pd: import pandas as pd def nifti1_symmat(image_data, *args, **kwargs): """Returns a Nifti1Image with a symmetric matrix intent Parameters ---------- image_data : array-like should have lower triangular elements of a symmetric matrix along the last dimension all other arguments and keywords are passed to Nifti1Image Returns ------- image : Nifti1Image 5d, extra dimensions addes before the last. Has symmetric matrix intent
import numpy as np from dipy.utils.optpkg import optional_package import itertools fury, have_fury, setup_module = optional_package('fury') if have_fury: from dipy.viz import actor, ui, colormap from dipy.viz.gmem import HORIMEM def build_label(text, font_size=18, bold=False): """ Simple utility function to build labels Parameters ---------- text : str font_size : int bold : bool Returns ------- label : TextBlock2D """ label = ui.TextBlock2D() label.message = text label.font_size = font_size label.font_family = 'Arial' label.justification = 'left' label.bold = bold
from __future__ import division, print_function, absolute_import from dipy.viz.utils import set_input # Conditional import machinery for vtk from dipy.utils.optpkg import optional_package # Allow import, but disable doctests if we don't have vtk vtk, have_vtk, setup_module = optional_package('vtk') colors, have_vtk_colors, _ = optional_package('vtk.util.colors') ns, have_numpy_support, _ = optional_package('vtk.util.numpy_support') if have_vtk: version = vtk.vtkVersion.GetVTKSourceVersion().split(' ')[-1] major_version = vtk.vtkVersion.GetVTKMajorVersion() def load_polydata(file_name): """ Load a vtk polydata to a supported format file Supported file formats are OBJ, VTK, FIB, PLY, STL and XML Parameters ---------- file_name : string Returns ------- output : vtkPolyData """ # get file extension (type) lower case
import numpy as np import numpy.testing as npt import nibabel as nib from numpy.testing import assert_equal, run_module_suite from dipy.data import get_fnames from dipy.io.streamline import save_trk from dipy.tracking.streamline import Streamlines import os import numpy.testing as npt from dipy.utils.optpkg import optional_package from dipy.io.image import save_nifti from nibabel.tmpdirs import TemporaryDirectory from dipy.stats.analysis import bundle_analysis, gaussian_weights, afq_profile from dipy.testing import assert_true _, have_pd, _ = optional_package("pandas") _, have_smf, _ = optional_package("statsmodels") _, have_tables, _ = optional_package("tables") @npt.dec.skipif(not have_pd or not have_smf or not have_tables) def test_ba(): with TemporaryDirectory() as dirpath: streams, hdr = nib.trackvis.read(get_fnames('fornix')) fornix = [s[0] for s in streams] f = Streamlines(fornix) mb = os.path.join(dirpath, "model_bundles")
import numpy as np # Conditional import machinery for vtk from dipy.utils.optpkg import optional_package # Allow import, but disable doctests if we don't have vtk vtk, have_vtk, setup_module = optional_package('vtk') cm, have_matplotlib, _ = optional_package('matplotlib.cm') if have_matplotlib: get_cmap = cm.get_cmap else: from dipy.data import get_cmap from warnings import warn def colormap_lookup_table(scale_range=(0, 1), hue_range=(0.8, 0), saturation_range=(1, 1), value_range=(0.8, 0.8)): """ Lookup table for the colormap Parameters ---------- scale_range : tuple It can be anything e.g. (0, 1) or (0, 255). Usually it is the mininum and maximum value of your data. Default is (0, 1). hue_range : tuple of floats HSV values (min 0 and max 1). Default is (0.8, 0). saturation_range : tuple of floats HSV values (min 0 and max 1). Default is (1, 1).
import numpy.testing as npt from dipy.data import read_viz_icons, fetch_viz_icons from dipy.viz import ui from dipy.viz import window from dipy.data import DATA_DIR from dipy.testing.decorators import xvfb_it # Conditional import machinery for vtk from dipy.utils.optpkg import optional_package # Allow import, but disable doctests if we don't have vtk from dipy.viz.ui import UI vtk, have_vtk, setup_module = optional_package('vtk') use_xvfb = os.environ.get('TEST_WITH_XVFB', False) if use_xvfb == 'skip': skip_it = True else: skip_it = False @npt.dec.skipif(not have_vtk or skip_it) @xvfb_it def test_ui(recording=False): print("Using VTK {}".format(vtk.vtkVersion.GetVTKVersion())) filename = "test_ui.log.gz" recording_filename = pjoin(DATA_DIR, filename)
""" import warnings import numpy as np from dipy.utils.optpkg import optional_package import dipy.core.geometry as geo import dipy.core.gradients as grad import dipy.core.optimize as opt import dipy.sims.voxel as sims import dipy.reconst.dti as dti import dipy.data as dpd from dipy.reconst.base import ReconstModel, ReconstFit from dipy.reconst.cache import Cache from dipy.core.onetime import auto_attr lm, has_sklearn, _ = optional_package('sklearn.linear_model') # If sklearn is unavailable, we can fall back on nnls (but we also warn the # user that we are about to do that): if not has_sklearn: w = "sklearn is not available, you can use 'nnls' method to fit" w += " the SparseFascicleModel" warnings.warn(w) def sfm_design_matrix(gtab, sphere, response, mode='signal'): """ Construct the SFM design matrix Parameters ----------
""" Class for profiling cython code """ import os import subprocess from dipy.utils.optpkg import optional_package cProfile, _, _ = optional_package('cProfile') pstats, _, _ = optional_package('pstats', 'pstats is not installed. It is part of the' 'python-profiler package in Debian/Ubuntu') class Profiler(): ''' Profile python/cython files or functions If you are profiling cython code you need to add # cython: profile=True on the top of your .pyx file and for the functions that you do not want to profile you can use this decorator in your cython files @cython.profile(False) Parameters ------------- caller : file or function call args : function arguments Attributes
import numpy as np from dipy.utils.optpkg import optional_package fury, have_fury, setup_module = optional_package('fury') if have_fury: from dipy.viz import actor, ui def build_label(text, font_size=18, bold=False): """ Simple utility function to build labels Parameters ---------- text : str font_size : int bold : bool Returns ------- label : TextBlock2D """ label = ui.TextBlock2D() label.message = text label.font_size = font_size label.font_family = 'Arial' label.justification = 'left' label.bold = bold label.italic = False label.shadow = False
""" Class for profiling cython code """ import os import subprocess from dipy.utils.optpkg import optional_package cProfile, _, _ = optional_package('cProfile') pstats, _, _ = optional_package( 'pstats', 'pstats is not installed. It is part of the' 'python-profiler package in Debian/Ubuntu') class Profiler(): """ Profile python/cython files or functions If you are profiling cython code you need to add # cython: profile=True on the top of your .pyx file and for the functions that you do not want to profile you can use this decorator in your cython files @cython.profile(False) Parameters ---------- caller : file or function call args : function arguments Attributes
from __future__ import division, print_function, absolute_import import numpy as np from scipy.ndimage import map_coordinates from dipy.viz.colormap import line_colors # Conditional import machinery for vtk from dipy.utils.optpkg import optional_package # import vtk # Allow import, but disable doctests if we don't have vtk vtk, have_vtk, setup_module = optional_package('vtk') ns, have_numpy_support, _ = optional_package('vtk.util.numpy_support') def set_input(vtk_object, inp): """ Generic input function which takes into account VTK 5 or 6 Parameters ---------- vtk_object: vtk object inp: vtkPolyData or vtkImageData or vtkAlgorithmOutput Returns ------- vtk_object Notes ------- This can be used in the following way::
import numpy as np import numpy.testing as nt import pytest import warnings from dipy.core.sphere import (Sphere, HemiSphere, unique_edges, unique_sets, faces_from_sphere_vertices, disperse_charges, disperse_charges_alt, _get_forces, _get_forces_alt, unit_octahedron, unit_icosahedron, hemi_icosahedron) from dipy.core.geometry import cart2sphere, vector_norm from dipy.core.sphere_stats import random_uniform_on_sphere from dipy.utils.optpkg import optional_package delaunay, have_delaunay, _ = optional_package('scipy.spatial.Delaunay') if have_delaunay: from scipy.spatial import Delaunay verts = unit_octahedron.vertices edges = unit_octahedron.edges oct_faces = unit_octahedron.faces r, theta, phi = cart2sphere(*verts.T) def test_sphere_construct_args(): nt.assert_raises(ValueError, Sphere) nt.assert_raises(ValueError, Sphere, x=1, theta=1) nt.assert_raises(ValueError, Sphere, xyz=1, theta=1) nt.assert_raises(ValueError, Sphere, xyz=1, theta=1, phi=1)
import numpy as np from dipy.utils.optpkg import optional_package matplotlib, has_mpl, setup_module = optional_package("matplotlib") plt, _, _ = optional_package("matplotlib.pyplot") def _tile_plot(imgs, titles, **kwargs): """ Helper function """ # Create a new figure and plot the three images fig, ax = plt.subplots(1, len(imgs)) for ii, a in enumerate(ax): a.set_axis_off() a.imshow(imgs[ii], **kwargs) a.set_title(titles[ii]) return fig def simple_plot(file_name, title, x, y, xlabel, ylabel): """ Saves the simple plot with given x and y values Parameters ---------- file_name : string file name for saving the plot title : string title of the plot x : integer list x-axis values to be ploted
from dipy.reconst.dti import TensorModel from dipy.io.peaks import load_peaks from dipy.io.stateful_tractogram import Space, StatefulTractogram from dipy.io.streamline import load_tractogram, save_tractogram from dipy.segment.mask import segment_from_cfa from dipy.segment.mask import bounding_box # from dipy.io.streamline import load_trk, save_trk from dipy.tracking.streamline import transform_streamlines from glob import glob from dipy.workflows.workflow import Workflow from dipy.segment.bundles import bundle_shape_similarity from dipy.stats.analysis import assignment_map from dipy.stats.analysis import anatomical_measures from dipy.stats.analysis import peak_values pd, have_pd, _ = optional_package("pandas") smf, have_smf, _ = optional_package("statsmodels") tables, have_tables, _ = optional_package("tables") matplt, have_matplotlib, _ = optional_package("matplotlib") if have_pd: import pandas as pd if have_smf: import statsmodels.formula.api as smf if have_matplotlib: import matplotlib as matplt import matplotlib.pyplot as plt
from __future__ import division import numpy as np from scipy import special from scipy.special import erf from ..utils import utils from ..core.constants import CONSTANTS from ..core.modeling_framework import ModelProperties from dipy.utils.optpkg import optional_package numba, have_numba, _ = optional_package("numba") DIFFUSIVITY_SCALING = 1e-9 DIAMETER_SCALING = 1e-6 A_SCALING = 1e-12 __all__ = [ 'C1Stick', 'C2CylinderStejskalTannerApproximation', 'C3CylinderCallaghanApproximation', 'C4CylinderGaussianPhaseApproximation' ] class C1Stick(ModelProperties): r""" The Stick model [1]_ - a cylinder with zero radius - typically used for intra-axonal diffusion. Parameters
""" Visualization tools for 2D projections of 3D functions on the sphere, such as ODFs. """ import numpy as np import scipy.interpolate as interp from dipy.utils.optpkg import optional_package import dipy.core.geometry as geo from dipy.testing import doctest_skip_parser matplotlib, has_mpl, setup_module = optional_package("matplotlib") plt, _, _ = optional_package("matplotlib.pyplot") tri, _, _ = optional_package("matplotlib.tri") bm, has_basemap, _ = optional_package("mpl_toolkits.basemap") @doctest_skip_parser def sph_project(vertices, val, ax=None, vmin=None, vmax=None, cmap=None, cbar=True, tri=False, boundary=False, **basemap_args): """Draw a signal on a 2D projection of the sphere. Parameters ---------- vertices : (N,3) ndarray unit vector points of the sphere val: (N) ndarray
import numpy as np from dipy.core import geometry as geo from dipy.core.gradients import GradientTable from dipy.data import default_sphere from dipy.reconst import shm from dipy.reconst.multi_voxel import multi_voxel_fit from dipy.utils.optpkg import optional_package cvx, have_cvxpy, _ = optional_package("cvxpy") SH_CONST = .5 / np.sqrt(np.pi) def multi_tissue_basis(gtab, sh_order, iso_comp): """ Builds a basis for multi-shell multi-tissue CSD model. Parameters ---------- gtab : GradientTable sh_order : int iso_comp: int Number of tissue compartments for running the MSMT-CSD. Minimum number of compartments required is 2. Returns ------- B : ndarray Matrix of the spherical harmonics model used to fit the data m : int ``|m| <= n`` The order of the harmonic.
""" Visualization tools for 2D projections of 3D functions on the sphere, such as ODFs. """ import numpy as np import scipy.interpolate as interp from dipy.utils.optpkg import optional_package import dipy.core.geometry as geo from dipy.testing.decorators import doctest_skip_parser matplotlib, has_mpl, setup_module = optional_package("matplotlib") plt, _, _ = optional_package("matplotlib.pyplot") tri, _, _ = optional_package("matplotlib.tri") bm, has_basemap, _ = optional_package("mpl_toolkits.basemap") @doctest_skip_parser def sph_project(vertices, val, ax=None, vmin=None, vmax=None, cmap=None, cbar=True, tri=False, boundary=False, **basemap_args): """Draw a signal on a 2D projection of the sphere. Parameters ---------- vertices : (N,3) ndarray unit vector points of the sphere val: (N) ndarray
from distutils.version import LooseVersion from dipy.utils.optpkg import optional_package tf, have_tf, _ = optional_package('tensorflow') if have_tf: if LooseVersion(tf.__version__) < LooseVersion('2.0.0'): raise ImportError('Please upgrade to TensorFlow 2+') class SingleLayerPerceptron(object): def __init__(self, input_shape=(28, 28), num_hidden=128, act_hidden='relu', dropout=0.2, num_out=10, act_out='softmax', optimizer='adam', loss='sparse_categorical_crossentropy'): """ Single Layer Perceptron with Dropout Parameters ---------- input_shape : tuple Shape of data to be trained num_hidden : int Number of nodes in hidden layer act_hidden : string Activation function used in hidden layer dropout : float Dropout ratio
import numpy as np # Conditional import machinery for vtk from dipy.utils.optpkg import optional_package # Allow import, but disable doctests if we don't have vtk vtk, have_vtk, setup_module = optional_package('vtk') cm, have_matplotlib, _ = optional_package('matplotlib.cm') if have_matplotlib: get_cmap = cm.get_cmap else: from dipy.data import get_cmap from warnings import warn def colormap_lookup_table(scale_range=(0, 1), hue_range=(0.8, 0), saturation_range=(1, 1), value_range=(0.8, 0.8)): """ Lookup table for the colormap Parameters ---------- scale_range : tuple It can be anything e.g. (0, 1) or (0, 255). Usually it is the mininum and maximum value of your data. Default is (0, 1). hue_range : tuple of floats HSV values (min 0 and max 1). Default is (0.8, 0). saturation_range : tuple of floats HSV values (min 0 and max 1). Default is (1, 1). value_range : tuple of floats HSV value (min 0 and max 1). Default is (0.8, 0.8).
import os import pytest import numpy as np import numpy.testing as npt from dipy.utils.optpkg import optional_package from nibabel.tmpdirs import TemporaryDirectory from dipy.data import get_fnames from dipy.io.image import save_nifti, load_nifti, load_nifti_data from dipy.testing import (assert_true, assert_false, assert_greater, assert_less) from dipy.workflows.denoise import (NLMeansFlow, LPCAFlow, MPPCAFlow, GibbsRingingFlow, Patch2SelfFlow) sklearn, has_sklearn, _ = optional_package('sklearn') needs_sklearn = pytest.mark.skipif( not has_sklearn, reason=sklearn._msg if not has_sklearn else "") def test_nlmeans_flow(): with TemporaryDirectory() as out_dir: data_path, _, _ = get_fnames() volume, affine = load_nifti(data_path) nlmeans_flow = NLMeansFlow() nlmeans_flow.run(data_path, out_dir=out_dir) assert_true(os.path.isfile( nlmeans_flow.last_generated_outputs['out_denoised'])) nlmeans_flow._force_overwrite = True
from __future__ import division, print_function, absolute_import import os import numpy as np from dipy.core.sphere import Sphere from dipy.direction.peaks import PeaksAndMetrics from distutils.version import LooseVersion # Conditional import machinery for pytables from dipy.utils.optpkg import optional_package # Allow import, but disable doctests, if we don't have pytables tables, have_tables, _ = optional_package('tables') # Useful variable for backward compatibility. if have_tables: TABLES_LESS_3_0 = LooseVersion(tables.__version__) < "3.0" from dipy.data import get_sphere from dipy.core.sphere import Sphere def _safe_save(f, group, array, name): """ Safe saving of arrays with specific names Parameters ---------- f : HDF5 file handle group : HDF5 group array : array
from scipy.ndimage.morphology import binary_dilation from dipy.utils.optpkg import optional_package from dipy.io import read_bvals_bvecs from dipy.io.image import load_nifti, save_nifti from dipy.core.gradients import gradient_table from dipy.segment.mask import median_otsu from dipy.reconst.dti import TensorModel from dipy.segment.mask import segment_from_cfa from dipy.segment.mask import bounding_box from dipy.workflows.workflow import Workflow from dipy.viz.regtools import simple_plot from dipy.stats.analysis import bundle_analysis pd, have_pd, _ = optional_package("pandas") smf, have_smf, _ = optional_package("statsmodels.formula.api") tables, have_tables, _ = optional_package("tables") if have_pd: import pandas as pd if have_smf: import statsmodels.formula.api as smf if have_tables: import tables class SNRinCCFlow(Workflow):
import os import numpy as np from os.path import join as pjoin from collections import defaultdict from dipy.viz import actor, window, interactor from dipy.viz import utils as vtk_utils from dipy.data import DATA_DIR import numpy.testing as npt from dipy.testing.decorators import xvfb_it # Conditional import machinery for vtk from dipy.utils.optpkg import optional_package # Allow import, but disable doctests if we don't have vtk vtk, have_vtk, setup_module = optional_package("vtk") use_xvfb = os.environ.get("TEST_WITH_XVFB", False) if use_xvfb == "skip": skip_it = True else: skip_it = False @npt.dec.skipif(not have_vtk or not actor.have_vtk_colors or skip_it) @xvfb_it def test_custom_interactor_style_events(recording=False): print("Using VTK {}".format(vtk.vtkVersion.GetVTKVersion())) filename = "test_custom_interactor_style_events.log.gz" recording_filename = pjoin(DATA_DIR, filename) renderer = window.Renderer()
import numpy as np from warnings import warn import time from dipy.utils.optpkg import optional_package import dipy.core.optimize as opt sklearn, has_sklearn, _ = optional_package('sklearn') linear_model, _, _ = optional_package('sklearn.linear_model') if not has_sklearn: warn(sklearn._msg) def _vol_split(train, vol_idx): """ Split the 3D volumes into the train and test set. Parameters ---------- train : ndarray Array of all 3D patches flattened out to be 2D. vol_idx: int The volume number that needs to be held out for training. Returns -------- cur_x : 2D-array (nvolumes*patch_size) x (nvoxels) Array of patches corresponding to all the volumes except for the held-out volume. y : 1D-array
from __future__ import division, print_function, absolute_import from dipy.viz.utils import set_input # Conditional import machinery for vtk from dipy.utils.optpkg import optional_package # Allow import, but disable doctests if we don't have vtk vtk, have_vtk, setup_module = optional_package("vtk") colors, have_vtk_colors, _ = optional_package("vtk.util.colors") ns, have_numpy_support, _ = optional_package("vtk.util.numpy_support") if have_vtk: version = vtk.vtkVersion.GetVTKSourceVersion().split(" ")[-1] major_version = vtk.vtkVersion.GetVTKMajorVersion() def load_polydata(file_name): """ Load a vtk polydata to a supported format file Supported file formats are OBJ, VTK, FIB, PLY, STL and XML Parameters ---------- file_name : string Returns ------- output : vtkPolyData """ # get file extension (type) lower case
# Init file for visualization package from __future__ import division, print_function, absolute_import import warnings from dipy.utils.optpkg import optional_package # Allow import, but disable doctests if we don't have fury fury, has_fury, _ = optional_package( 'fury', "You do not have FURY installed. Some visualization functions" "might not work for you. For installation instructions, please visit: " "https://fury.gl/") if has_fury: from fury import actor, window, colormap, interactor, ui, utils from fury.window import vtk from fury.data import (fetch_viz_icons, read_viz_icons, DATA_DIR as FURY_DATA_DIR) else: warnings.warn( "You do not have FURY installed. " "Some visualization functions might not work for you. " "For installation instructions, please visit: https://fury.gl/") # We make the visualization requirements optional imports: _, has_mpl, _ = optional_package( 'matplotlib', "You do not have Matplotlib installed. Some visualization functions" "might not work for you. For installation instructions, please visit: " "https://matplotlib.org/") if has_mpl:
from __future__ import division from warnings import warn import numpy as np from dipy.reconst.cache import Cache from dipy.reconst.multi_voxel import multi_voxel_fit from dipy.reconst.csdeconv import csdeconv from dipy.reconst.shm import real_sph_harm from scipy.special import gamma, hyp1f1 from dipy.core.geometry import cart2sphere from dipy.data import get_sphere from dipy.reconst.odf import OdfModel, OdfFit from scipy.optimize import leastsq from dipy.utils.optpkg import optional_package cvxpy, have_cvxpy, _ = optional_package("cvxpy") class ForecastModel(OdfModel, Cache): r"""Fiber ORientation Estimated using Continuous Axially Symmetric Tensors (FORECAST) [1,2,3]_. FORECAST is a Spherical Deconvolution reconstruction model for multi-shell diffusion data which enables the calculation of a voxel adaptive response function using the Spherical Mean Tecnique (SMT) [2,3]_. With FORECAST it is possible to calculate crossing invariant parallel diffusivity, perpendicular diffusivity, mean diffusivity, and fractional anisotropy [2]_ References ---------- .. [1] Anderson A. W., "Measurement of Fiber Orientation Distributions
from scipy.ndimage.interpolation import map_coordinates from scipy.spatial.distance import mahalanobis from dipy.utils.optpkg import optional_package from dipy.io.image import load_nifti from dipy.io.streamline import load_tractogram from dipy.segment.clustering import QuickBundles from dipy.segment.metric import AveragePointwiseEuclideanMetric from dipy.io.peaks import load_peaks from dipy.tracking.streamline import (set_number_of_points, values_from_volume, orient_by_streamline, transform_streamlines, Streamlines) pd, have_pd, _ = optional_package("pandas") _, have_tables, _ = optional_package("tables") if have_pd: import pandas as pd def _save_hdf5(fname, dt, col_name, col_size=5): """ Saves the given input dataframe to .h5 file Parameters ---------- fname : string file name for saving the hdf5 file dt : Pandas DataFrame DataFrame to be saved as .h5 file
import numpy as np from dipy.utils.optpkg import optional_package matplotlib, has_mpl, setup_module = optional_package("matplotlib") plt, _, _ = optional_package("matplotlib.pyplot") def _tile_plot(imgs, titles, **kwargs): """ Helper function """ # Create a new figure and plot the three images fig, ax = plt.subplots(1, len(imgs)) for ii, a in enumerate(ax): a.set_axis_off() a.imshow(imgs[ii], **kwargs) a.set_title(titles[ii]) return fig def overlay_images(img0, img1, title0='', title_mid='', title1='', fname=None): r""" Plot two images one on top of the other using red and green channels. Creates a figure containing three images: the first image to the left plotted on the red channel of a color image, the second to the right plotted on the green channel of a color image and the two given images on top of each other using the red channel for the first image and the green channel for the second one. It is assumed that both images have the same shape. The intended use of this function is to visually assess the quality of a registration result.
from __future__ import division, print_function, absolute_import import numpy as np from nibabel.affines import apply_affine from dipy.viz.colormap import colormap_lookup_table, create_colormap from dipy.viz.utils import lines_to_vtk_polydata from dipy.viz.utils import set_input # Conditional import machinery for vtk from dipy.utils.optpkg import optional_package # Allow import, but disable doctests if we don't have vtk vtk, have_vtk, setup_module = optional_package('vtk') colors, have_vtk_colors, _ = optional_package('vtk.util.colors') numpy_support, have_ns, _ = optional_package('vtk.util.numpy_support') if have_vtk: version = vtk.vtkVersion.GetVTKSourceVersion().split(' ')[-1] major_version = vtk.vtkVersion.GetVTKMajorVersion() def slicer(data, affine=None, value_range=None, opacity=1., lookup_colormap=None, interpolation='linear', picking_tol=0.025): """ Cuts 3D scalar or rgb volumes into 2D images
try: import tkFileDialog as filedialog except ImportError: from tkinter import filedialog # Conditional import machinery for vtk from dipy.utils.optpkg import optional_package from dipy import __version__ as dipy_version from dipy.utils.six import string_types # import vtk # Allow import, but disable doctests if we don't have vtk vtk, have_vtk, setup_module = optional_package('vtk') colors, have_vtk_colors, _ = optional_package('vtk.util.colors') numpy_support, have_ns, _ = optional_package('vtk.util.numpy_support') _, have_imread, _ = optional_package('Image') if have_vtk: version = vtk.vtkVersion.GetVTKSourceVersion().split(' ')[-1] major_version = vtk.vtkVersion.GetVTKMajorVersion() from vtk.util.numpy_support import vtk_to_numpy vtkRenderer = vtk.vtkRenderer else: vtkRenderer = object if have_imread: from scipy.misc import imread
import numpy as np # Conditional import machinery for vtk from dipy.utils.optpkg import optional_package # Allow import, but disable doctests if we don't have vtk vtk, have_vtk, setup_module = optional_package('vtk') def colormap_lookup_table(scale_range=(0, 1), hue_range=(0.8, 0), saturation_range=(1, 1), value_range=(0.8, 0.8)): """ Lookup table for the colormap Parameters ---------- scale_range : tuple It can be anything e.g. (0, 1) or (0, 255). Usually it is the mininum and maximum value of your data. Default is (0, 1). hue_range : tuple of floats HSV values (min 0 and max 1). Default is (0.8, 0). saturation_range : tuple of floats HSV values (min 0 and max 1). Default is (1, 1). value_range : tuple of floats HSV value (min 0 and max 1). Default is (0.8, 0.8). Returns ------- lookup_table : vtkLookupTable """ lookup_table = vtk.vtkLookupTable()
from scipy.ndimage.interpolation import map_coordinates from scipy.spatial.distance import mahalanobis from dipy.utils.optpkg import optional_package from dipy.io.image import load_nifti from dipy.io.streamline import load_trk from dipy.segment.clustering import QuickBundles from dipy.segment.metric import AveragePointwiseEuclideanMetric from dipy.io.peaks import load_peaks from dipy.tracking.streamline import (set_number_of_points, values_from_volume, orient_by_streamline, transform_streamlines, Streamlines) pd, have_pd, _ = optional_package("pandas") _, have_tables, _ = optional_package("tables") if have_pd: import pandas as pd def _save_hdf5(fname, dt, col_name, col_size=5): """ Saves the given input dataframe to .h5 file Parameters ---------- fname : string file name for saving the hdf5 file dt : Pandas DataFrame DataFrame to be saved as .h5 file
from os.path import join from dipy.utils.optpkg import optional_package import numpy.testing as npt from numpy.testing import run_module_suite, assert_raises import nibabel as nib from nibabel.tmpdirs import TemporaryDirectory from dipy.io.streamline import save_trk import numpy as np from dipy.tracking.streamline import Streamlines from dipy.testing import assert_true from dipy.io.image import save_nifti from dipy.data import get_fnames from dipy.workflows.stats import SNRinCCFlow from dipy.workflows.stats import BundleAnalysisPopulationFlow from dipy.workflows.stats import LinearMixedModelsFlow pd, have_pandas, _ = optional_package("pandas") _, have_statsmodels, _ = optional_package("statsmodels") _, have_tables, _ = optional_package("tables") def test_stats(): with TemporaryDirectory() as out_dir: data_path, bval_path, bvec_path = get_fnames('small_101D') vol_img = nib.load(data_path) volume = vol_img.get_data() mask = np.ones_like(volume[:, :, :, 0]) mask_img = nib.Nifti1Image(mask.astype(np.uint8), vol_img.affine) mask_path = join(out_dir, 'tmp_mask.nii.gz') nib.save(mask_img, mask_path) snr_flow = SNRinCCFlow(force=True)