def __init__(self, pairwise_metric='correlation', center_data=False, square=False, **kwargs): """Initialize Parameters ---------- pairwise_metric : String. Distance metric to use for calculating pairwise vector distances for dissimilarity matrix (DSM). See scipy.spatial.distance.pdist for all possible metrics. (Default ='correlation', i.e. one minus Pearson correlation) center_data : boolean. (Optional. Default = False) If True then center each column of the data matrix by subtracing the column mean from each element (by chunk if chunks_attr specified). This is recommended especially when using pairwise_metric = 'correlation'. square : boolean. (Optional. Default = False) If True return the square distance matrices, if False, returns the flattened lower triangle. Returns ------- Dataset : Contains a column vector of length = n(n-1)/2 of pairwise distances between all samples if square = False; square dissimilarty matrix if square = True. """ Measure.__init__(self, **kwargs) self.pairwise_metric = pairwise_metric self.center_data = center_data self.square = square
def __init__(self, xSs_behav, targ_comp, comparison_metric='pearson', chunks_attr='chunks', **kwargs): """Initialize Parameters ---------- xSs_behav: Dictionary of behavioral value between subjects to be correlated with intrasubject neural similarity (subjects are keys) targ_comp: List of targets whose similarity is correlated with xSs_behav chunks_attr : Chunks attribute to use for chunking dataset. Can be any samples attribute specified in the dataset.sa dict. (Default: 'chunks') comparison_metric: Distance measure for behavioral to neural comparison. 'pearson' (default) or 'spearman' center_data : boolean. (Optional. Default = False) If True then center each column of the data matrix by subtracing the column mean from each element (by chunk if chunks_attr specified). This is recommended especially when using pairwise_metric = 'correlation'. Returns ------- Dataset: Contains an array of the pairwise correlations between the DSMs defined for each chunk of the dataset. Length of array will be N(N-1)/2 for N chunks. To Do: Another metric for consistency metric could be the "Rv" coefficient... (ac) """ # init base classes first Measure.__init__(self, **kwargs) self.xSs_behav = xSs_behav self.targ_comp = targ_comp self.chunks_attr = chunks_attr self.comparison_metric = comparison_metric
def __init__(self, dsmatrix, dset_metric, output_metric='spearman'): Measure.__init__(self) self.dsmatrix = dsmatrix self.dset_metric = dset_metric self.output_metric = output_metric self.dset_dsm = []
def __init__(self, queryengine, roi_ids=None, nproc=None, **kwargs): """ Parameters ---------- queryengine : QueryEngine Engine to use to discover the "neighborhood" of each feature. See :class:`~mvpa2.misc.neighborhood.QueryEngine`. roi_ids : None or list(int) or str List of feature ids (not coordinates) the shall serve as ROI seeds (e.g. sphere centers). Alternatively, this can be the name of a feature attribute of the input dataset, whose non-zero values determine the feature ids. By default all features will be used. nproc : None or int How many processes to use for computation. Requires `pprocess` external module. If None -- all available cores will be used. **kwargs In addition this class supports all keyword arguments of its base-class :class:`~mvpa2.measures.base.Measure`. """ Measure.__init__(self, **kwargs) if nproc is not None and nproc > 1 and not externals.exists('pprocess'): raise RuntimeError("The 'pprocess' module is required for " "multiprocess searchlights. Please either " "install python-pprocess, or reduce `nproc` " "to 1 (got nproc=%i) or set to default None" % nproc) self._queryengine = queryengine if roi_ids is not None and not isinstance(roi_ids, str) \ and not len(roi_ids): raise ValueError("Cannot run searchlight on an empty list of roi_ids") self.__roi_ids = roi_ids self.nproc = nproc
def __init__(self, dsmatrix, dset_metric, output_metric="spearman"): Measure.__init__(self) self.dsmatrix = dsmatrix self.dset_metric = dset_metric self.output_metric = output_metric self.dset_dsm = []
def __init__(self, axis, fx, other_axis_prefix=None, **kwargs): ''' Parameters ---------- axis: str or int 'samples' (or 0) or 'features' (or 1). fx: callable function to determine the winner. When called with a dataset ds, it should return a vector with ds.nsamples values (if axis=='features') or ds.nfeatures values (if axis=='samples'). other_axis_prefix: str prefix used for feature or sample attributes set on the other axis. ''' Measure.__init__(self, **kwargs) if type(axis) is str: str2num = dict(samples=0, features=1) if not axis in str2num: raise ValueError("Illegal axis: should be %s" % ' or '.join(str2num)) axis = str2num[axis] elif not axis in (0, 1): raise ValueError("Illegal axis: should be 0 or 1") self.__axis = axis self.__fx = fx self.__other_axis_prefix = other_axis_prefix
def __init__(self, queryengine, roi_ids=None, nproc=None, **kwargs): """ Parameters ---------- queryengine : QueryEngine Engine to use to discover the "neighborhood" of each feature. See :class:`~mvpa2.misc.neighborhood.QueryEngine`. roi_ids : None or list(int) or str List of feature ids (not coordinates) the shall serve as ROI seeds (e.g. sphere centers). Alternatively, this can be the name of a feature attribute of the input dataset, whose non-zero values determine the feature ids. By default all features will be used. nproc : None or int How many processes to use for computation. Requires `pprocess` external module. If None -- all available cores will be used. **kwargs In addition this class supports all keyword arguments of its base-class :class:`~mvpa2.measures.base.Measure`. """ Measure.__init__(self, **kwargs) if nproc is not None and nproc > 1 and not externals.exists('pprocess'): raise RuntimeError("The 'pprocess' module is required for " "multiprocess searchlights. Please either " "install python-pprocess, or reduce `nproc` " "to 1 (got nproc=%i)" % nproc) self._queryengine = queryengine if roi_ids is not None and not isinstance(roi_ids, str) \ and not len(roi_ids): raise ValueError, \ "Cannot run searchlight on an empty list of roi_ids" self.__roi_ids = roi_ids self.nproc = nproc
def __init__(self, dset_metric, nsubjs, compare_ave, k, **kwargs): Measure.__init__(self, **kwargs) self.dset_metric = dset_metric self.dset_dsm = [] self.nsubjs = nsubjs self.compare_ave = compare_ave self.k = k
def __init__(self, model, n_ev, mat, nuisance, scan_onsets, min_voxels=10): Measure.__init__(self) self.model = model self.n_ev = n_ev self.n = (n_ev**2 - n_ev) // 2 self.mat = mat self.nuisance = nuisance self.scan_onsets = scan_onsets self.min_voxels = min_voxels
def __init__(self, **kwargs): """ Returns ------- Dataset If square is False, contains a column vector of length = n(n-1)/2 of pairwise distances between all samples. A sample attribute ``pairs`` identifies the indices of input samples for each individual pair. If square is True, the dataset contains a square dissimilarty matrix and the entire sample attributes collection of the input dataset. """ Measure.__init__(self, **kwargs)
def __init__(self, targs_comps, sample_covariable, pairwise_metric='correlation', comparison_metric='pearson', center_data = False, corrcoef_only = False, **kwargs): """ Initialize Parameters ---------- dataset : Dataset with N samples such that corresponding dissimilarity matrix has N*(N-1)/2 unique pairwise distances targs_comps: Dict of trial by trial targets (keys) and their comparison targets (values) - ***this measure assumes other omitted first*** sample_covariable: Name of the variable (sample attribute) with a value for each sample. The distance of each sample with the comparison_sample will be correlated with this variable. pairwise_metric : To be used by pdist to calculate dataset DSM Default: 'correlation', see scipy.spatial.distance.pdist for other metric options. comparison_metric : To be used for comparing dataset dsm with target dsm Default: 'pearson'. Options: 'pearson' or 'spearman' center_data : Center data by subtracting mean column values from columns prior to calculating dataset dsm. Default: False corrcoef_only : If true, return only the correlation coefficient (rho), otherwise return rho and probability, p. Default: False Returns ------- Dataset : Dataset contains the correlation coefficient (rho) only or rho plus p, when corrcoef_only is set to false. ------- TO DO: Should this be done as repeated measures ANCOVA instead? Does not currently handle rho comparison of samples, or rho corr with covariable Should use mean_group_sample in wrapper function to get comparison_sample Maybe have omit inside this method? """ # init base classes first Measure.__init__(self, **kwargs) if comparison_metric not in ['spearman','pearson']: raise Exception("comparison_metric %s is not in " "['spearman','pearson']" % comparison_metric) self.targs_comps = targs_comps self.sample_covariable = sample_covariable #if comparison_metric == 'spearman': # self.target_dsm = rankdata(target_dsm) self.pairwise_metric = pairwise_metric self.comparison_metric = comparison_metric self.center_data = center_data self.corrcoef_only = corrcoef_only
def __init__(self, **kwargs): """ Returns ------- Dataset Contains the pairwise correlations between the DSMs computed from each chunk of the input dataset. If square is False, this is a column vector of length N(N-1)/2 for N chunks. If square is True, this is a square matrix of size NxN for N chunks. """ # TODO: Another metric for consistency metric could be the "Rv" # coefficient... (ac) # init base classes first Measure.__init__(self, **kwargs)
def __init__(self, target_dsm, control_dsms = None, resid = False, pairwise_metric='correlation', comparison_metric='pearson', center_data = False, corrcoef_only = False, **kwargs): """ Initialize Parameters ---------- dataset : Dataset with N samples such that corresponding dissimilarity matrix has N*(N-1)/2 unique pairwise distances target_dsm : numpy array, length N*(N-1)/2. Target dissimilarity matrix this is the predictor who's results get mapped back control_dsms: list of numpy arrays, length N*(N-1)/2. DMs to be controlled for Default: 'None' controlled for when getting results of target_dsm back resid: Set to True to return residuals to searchlight center for smoothing estimation, default to False pairwise_metric : To be used by pdist to calculate dataset DSM Default: 'correlation', see scipy.spatial.distance.pdist for other metric options. comparison_metric : To be used for comparing dataset dsm with target dsm Default: 'pearson'. Options: 'pearson' or 'spearman' center_data : Center data by subtracting mean column values from columns prior to calculating dataset dsm. Default: False corrcoef_only : If true, return only the correlation coefficient (rho), otherwise return rho and probability, p. Default: False Returns ------- Dataset : Dataset contains the correlation coefficient (rho) only or rho plus p, when corrcoef_only is set to false. """ # init base classes first Measure.__init__(self, **kwargs) if comparison_metric not in ['spearman','pearson']: raise Exception("comparison_metric %s is not in " "['spearman','pearson']" % comparison_metric) self.target_dsm = target_dsm if comparison_metric == 'spearman': self.target_dsm = rankdata(target_dsm) self.pairwise_metric = pairwise_metric self.comparison_metric = comparison_metric self.center_data = center_data self.corrcoef_only = corrcoef_only self.control_dsms = control_dsms if comparison_metric == 'spearman' and control_dsms != None: self.control_dsms = [rankdata(dm) for dm in control_dsms] self.resid = resid
def __init__(self, vols, item_comp, stat, contrasts, n_perm=1000, min_voxels=10): Measure.__init__(self) # get indices of items to compare if stat == 'vector': perm_type = 'item_type' elif stat == 'vectri': perm_type = 'triad' else: raise ValueError(f'Unknown stat: {stat}') train_ind, test_ind = rsa.prep_triad_vector(vols, item_comp, n_perm, perm_type=perm_type) self.train_ind = train_ind self.test_ind = test_ind self.stat = stat self.contrasts = contrasts self.min_voxels = min_voxels
def __init__(self, target_dsm, **kwargs): """ Parameters ---------- target_dsm : array (length N*(N-1)/2) Target dissimilarity matrix Returns ------- Dataset If ``corrcoef_only`` is True, contains one feature: the correlation coefficient (rho); or otherwise two-fetaures: rho plus p. """ # init base classes first Measure.__init__(self, **kwargs) self.target_dsm = target_dsm if self.params.comparison_metric == 'spearman': self.target_dsm = rankdata(target_dsm)
def __init__(self, target_dsm, partial_dsm = None, pairwise_metric='correlation', comparison_metric='pearson', center_data = False, corrcoef_only = False, **kwargs): """ Initialize Parameters ---------- dataset : Dataset with N samples such that corresponding dissimilarity matrix has N*(N-1)/2 unique pairwise distances target_dsm : numpy array, length N*(N-1)/2. Target dissimilarity matrix partial_dsm: numpy array, length N*(N-1)/2. DSM to be partialled out Default: 'None'; assumes use of pcorr.py pairwise_metric : To be used by pdist to calculate dataset DSM Default: 'correlation', see scipy.spatial.distance.pdist for other metric options. comparison_metric : To be used for comparing dataset dsm with target dsm Default: 'pearson'. Options: 'pearson' or 'spearman' center_data : Center data by subtracting mean column values from columns prior to calculating dataset dsm. Default: False corrcoef_only : If true, return only the correlation coefficient (rho), otherwise return rho and probability, p. Default: False Returns ------- Dataset : Dataset contains the correlation coefficient (rho) only or rho plus p, when corrcoef_only is set to false. """ # init base classes first Measure.__init__(self, **kwargs) if comparison_metric not in ['spearman','pearson']: raise Exception("comparison_metric %s is not in " "['spearman','pearson']" % comparison_metric) self.target_dsm = target_dsm if comparison_metric == 'spearman': self.target_dsm = rankdata(target_dsm) self.pairwise_metric = pairwise_metric self.comparison_metric = comparison_metric self.center_data = center_data self.corrcoef_only = corrcoef_only self.partial_dsm = partial_dsm if comparison_metric == 'spearman' and partial_dsm != None: self.partial_dsm = rankdata(partial_dsm)
def __init__(self, target_dsm, pairwise_metric='correlation', comparison_metric='pearson', center_data=False, corrcoef_only=False, **kwargs): """ Initialize Parameters ---------- dataset : Dataset with N samples such that corresponding dissimilarity matrix has N*(N-1)/2 unique pairwise distances target_dsm : numpy array, length N*(N-1)/2. Target dissimilarity matrix pairwise_metric : To be used by pdist to calculate dataset DSM Default: 'correlation', see scipy.spatial.distance.pdist for other metric options. comparison_metric : To be used for comparing dataset dsm with target dsm Default: 'pearson'. Options: 'pearson' or 'spearman' center_data : Center data by subtracting mean column values from columns prior to calculating dataset dsm. Default: False corrcoef_only : If true, return only the correlation coefficient (rho), otherwise return rho and probability, p. Default: False Returns ------- Dataset : Dataset contains the correlation coefficient (rho) only or rho plus p, when corrcoef_only is set to false. """ # init base classes first Measure.__init__(self, **kwargs) if comparison_metric not in ['spearman', 'pearson']: raise Exception("comparison_metric %s is not in " "['spearman','pearson']" % comparison_metric) self.target_dsm = target_dsm if comparison_metric == 'spearman': self.target_dsm = rankdata(target_dsm) self.pairwise_metric = pairwise_metric self.comparison_metric = comparison_metric self.center_data = center_data self.corrcoef_only = corrcoef_only
def __init__(self, pairs_dsm, pairwise_metric='correlation', comparison_metric='pearson', center_data = False, corrcoef_only = False, **kwargs): """ Initialize Parameters ---------- dataset : Dataset with N samples such that corresponding dissimilarity matrix has N*(N-1)/2 unique pairwise distances pairs_dsm : Dictionary of target pairs separated by '-' (keys) and corresponding predicted model dissimilarity values (values) pairwise_metric : To be used by pdist to calculate dataset DSM Default: 'correlation', see scipy.spatial.distance.pdist for other metric options. comparison_metric : To be used for comparing dataset dsm with target dsm Default: 'pearson'. Options: 'pearson' or 'spearman' center_data : Center data by subtracting mean column values from columns prior to calculating dataset dsm. Default: False corrcoef_only : If true, return only the correlation coefficient (rho), otherwise return rho and probability, p. Default: False Returns ------- Dataset : Dataset contains the correlation coefficient (rho) only or rho plus p, when corrcoef_only is set to false. ------- TO DO: Add partial correlation and multiple regression RSA """ # init base classes first Measure.__init__(self, **kwargs) if comparison_metric not in ['spearman','pearson','euclidean']: raise Exception("comparison_metric %s is not in " "['spearman','pearson','euclidean']" % comparison_metric) self.pairs_dsm = pairs_dsm self.pairwise_metric = pairwise_metric self.comparison_metric = comparison_metric self.center_data = center_data self.corrcoef_only = corrcoef_only self.pairs = [i.split('-') for i in self.pairs_dsm.keys()]
def __init__(self, chunks_attr='chunks', pairwise_metric='correlation', consistency_metric='pearson', center_data=False, **kwargs): """Initialize Parameters ---------- chunks_attr : Chunks attribute to use for chunking dataset. Can be any samples attribute specified in the dataset.sa dict. (Default: 'chunks') pairwise_metric : Distance metric to use for calculating dissimilarity matrices from the set of samples in each chunk specified. See spatial.distance.pdist for all possible metrics. (Default = 'correlation', i.e. one minus Pearson correlation) consistency_metric: Correlation measure to use for the correlation between dissimilarity matrices. Options are 'pearson' (default) or 'spearman' center_data : boolean. (Optional. Default = False) If True then center each column of the data matrix by subtracing the column mean from each element (by chunk if chunks_attr specified). This is recommended especially when using pairwise_metric = 'correlation'. Returns ------- Dataset: Contains an array of the pairwise correlations between the DSMs defined for each chunk of the dataset. Length of array will be N(N-1)/2 for N chunks. To Do: Another metric for consistency metric could be the "Rv" coefficient... (ac) """ # init base classes first Measure.__init__(self, **kwargs) self.pairwise_metric = pairwise_metric self.consistency_metric = consistency_metric self.chunks_attr = chunks_attr self.center_data = center_data
def __init__(self, pairs, pairwise_metric='correlation', **kwargs): """ Initialize Parameters ---------- dataset : Dataset with N samples such that corresponding dissimilarity matrix has N*(N-1)/2 unique pairwise distances Make sure is in alphabetical order! pairs : list of lists (pairs) of target names pairwise_metric : To be used by pdist to calculate dataset DSM Default: 'correlation', see scipy.spatial.distance.pdist for other metric options. Returns ------- Dataset : Dataset contains the sim value ------- """ # init base classes first Measure.__init__(self, **kwargs) self.pairs = pairs self.pairwise_metric = pairwise_metric
def __init__(self, target_dsm, control_dsms = None, **kwargs): """ Parameters ---------- target_dsm : array (length N*(N-1)/2) Target dissimilarity matrix control_dsms : list of arrays (each length N*(N-1)/2) Dissimilarity matrices to control for in multiple regression; flexible number allowed *Optional. Returns r/rho coefficients for target_dsm, controlling for these dsms Returns ------- Dataset If ``corrcoef_only`` is True, contains one feature: the correlation coefficient (rho); or otherwise two-fetaures: rho plus p. """ # init base classes first Measure.__init__(self, **kwargs) self.target_dsm = target_dsm self.control_dsms = control_dsms if self.params.comparison_metric == 'spearman': self.target_dsm = rankdata(target_dsm) if control_dsms != None: self.control_dsms = [rankdata(dm) for dm in control_dsms]
def __init__(self): Measure.__init__(self, auto_train=True)
def __init__(self, **kwargs): Measure.__init__(self, **kwargs)
def __init__(self, model_rdms, n_perm, min_voxels=10): Measure.__init__(self) self.perm = prsa.init_pRSA(n_perm, model_rdms) self.min_voxels = min_voxels self.n_model = len(model_rdms)
def __init__(self, vols, n_perm=1000): Measure.__init__(self) self.vols = vols self.cond_ind = rsa.prep_triad_delta(vols, n_perm)
def __init__(self, dtype, **kwargs): Measure.__init__(self, **kwargs) self.dtype = dtype
def __init__(self, model='regression', cthresh=0.10): Measure.__init__(self) self.cthresh = cthresh self.model = model
def __init__(self, **kwargs): Measure.__init__(self, **kwargs) self._train_ds = None
def __init__(self, metric='pearson', space='targets', **kwargs): Measure.__init__(self, **kwargs) self.metric = metric self.space = space