def peaks_to_vector(coordinates, mask= '/scratch/02863/mparikh/data/MNI152_T1_2mm_brain.nii.gz', radius=10): """ Takes in a list of valid peak coordinates and returns a vector of the corresponding image Parameters ---------- coordinates : list of lists list of x/y/z coordinates mask : mask object in nifti fomat used to vectorize the image radius : int, optional the radius of sphere to expand around the peaks in mm. defaults to 10mm. Returns ------- dense_img : nifti image 1D Numpy array of in-mask voxels """ # transform the coordinates to matrix space #print(coordinates) new_coordinates = nbt.xyz_to_mat(np.array(coordinates)) # now get the denser image, expanding via spheres dense_img = nbi.map_peaks_to_image(new_coordinates, r=radius) # Create a mask object for the image niftiMask = nbm.Mask(mask) # mask the image formed return niftiMask.mask(dense_img)
def __init__(self, dataset=None, method='pearson', features=None, mask=None, image_type='pFgA_z', threshold=0.001): """ Initialize a new Decoder instance. Args: dataset: An optional Dataset instance containing features to use in decoding. method: The decoding method to use (optional). By default, Pearson correlation. features: Optional list of features to use in decoding. If None, use all features found in dataset. If features is a list of strings, use only the subset of features in the Dataset that are named in the list. If features is a list of filenames, ignore the dataset entirely and use only the features passed as image files in decoding. mask: An optional mask to apply to features and input images. If None, will use the one in the current Dataset. image_type: An optional string indicating the type of image to use when constructing feature-based images. See meta.analyze_features() for details. By default, uses reverse inference z-score images. threshold: If decoding from a Dataset instance, this is the feature threshold to use to generate the feature maps used in the decoding. """ if dataset is not None: self.dataset = dataset self.method = method.lower() # If no mask is passed, use the dataset's. if mask is None: self.mask = dataset.volume else: from neurosynth.base import mask as m self.mask = m.Mask(mask) if features is None: features = dataset.get_feature_names() self.load_features(features, image_type=image_type, threshold=threshold)
def __init__( self, filename, feature_filename=None, volume=None, r=6, transform=True, target='MNI'): """ Initialize a new Dataset instance. Creates a new Dataset instance from a text file containing activation data. At minimum, the input file must contain tab-delimited columns named x, y, z, id, and space (case-insensitive). The x/y/z columns indicate the coordinates of the activation center or peak, the id column is used to group multiple activations from a single Mappable (e.g. an article). Typically the id should be a uniquely identifying field accessible to others, e.g., a doi in the case of entire articles. The space column indicates the nominal atlas used to produce each activation. Currently all values except 'TAL' (Talairach) will be ignored. If space == TAL and the transform argument is True, all activations reported in Talairach space will be converted to MNI space using the Lancaster et al transform. Args: filename: The name of a database file containing a list of activations. feature_filename: An optional filename to construct a FeatureTable from. volume: An optional Nifti/Analyze image name defining the space to use for all operations. If no image is passed, defaults to the MNI152 2 mm template packaged with FSL. r: An optional integer specifying the radius of the smoothing kernel, in mm. Defaults to 6 mm. transform: Optional argument specifying how to handle transformation between coordinates reported in different stereotactic spaces. When True (default), activations in Talairach (T88) space will be converted to MNI space using the Lancaster et al (2007) transform; no other transformations will be applied. When False, no transformation will be applied. Alternatively, the user can pass their own dictionary of named transformations to apply, in which case each activation will be checked against the dictionary as it is read in and the specified transformation will be applied if found (for further explanation, see transformations.Transformer). target: The name of the target space within which activation coordinates are represented. By default, MNI. Returns: A Dataset instance. """ # Instance properties self.r = r # Set up transformations between different image spaces if transform: if not isinstance(transform, dict): transform = {'T88': transformations.t88_to_mni(), 'TAL': transformations.t88_to_mni() } self.transformer = transformations.Transformer(transform, target) else: self.transformer = None # Load mappables self.mappables = self._load_mappables_from_txt(filename) # Load the volume into a new Mask try: if volume is None: resource_dir = os.path.join(os.path.dirname(__file__), os.path.pardir, 'resources') volume = os.path.join( resource_dir, 'MNI152_T1_2mm_brain.nii.gz') self.volume = mask.Mask(volume) except Exception as e: logger.error("Error loading volume %s: %s" % (volume, e)) # yoh: TODO -- IMHO should re-raise or not even swallow the exception here # raise e # Create supporting tables for images and features self.create_image_table() if feature_filename is not None: self.feature_table = FeatureTable(self, feature_filename)