def find_best_parameter_each_group(data): ''' find models of each group with the best beta and K :param data: data dict with training-validation split :return: best parameter set ''' num_prescription = len(data['train_x']) all_parameters = [] for i in range(num_prescription): x = data['train_x'][i] y = data['train_y'][i] cachedir = mkdtemp() memory = Memory(cachedir=cachedir, verbose=0) transformer = DRLRTransformer(solver='gurobi') informed_knn = Pipeline([('transformer', transformer), ('knn', KNeighborsRegressor())], memory=memory) num_train = len(x) num_sample_point = min((num_train / 2) - 3, 30) sr_point = np.linspace(1, np.sqrt(num_train / 2), num_sample_point) knn_space = list(set([int(np.square(x)) for x in sr_point])) parameter_grid = { 'transformer__reg_l1': [0], 'transformer__reg_l2': np.logspace(-10, 10, 21), 'knn__n_neighbors': knn_space } estimator_search = GridSearchCV(informed_knn, parameter_grid, cv=5, scoring='neg_median_absolute_error', error_score=1, refit=False) estimator_search.fit(x, y) best_parameters = estimator_search.best_params_ all_parameters.append([ best_parameters['transformer__reg_l2'], best_parameters['knn__n_neighbors'] ]) rmtree(cachedir) return all_parameters
def __init__(self, n_components=20, n_epochs=1, alpha=10, reduction_ratio='auto', dict_init=None, random_state=None, batch_size=20, method="cd", mask=None, smoothing_fwhm=4, standardize=True, detrend=True, low_pass=None, high_pass=None, t_r=None, target_affine=None, target_shape=None, mask_strategy='epi', mask_args=None, n_jobs=1, verbose=0, memory=Memory(cachedir=None), memory_level=0): BaseDecomposition.__init__(self, n_components=n_components, random_state=random_state, mask=mask, smoothing_fwhm=smoothing_fwhm, standardize=standardize, detrend=detrend, low_pass=low_pass, high_pass=high_pass, t_r=t_r, target_affine=target_affine, target_shape=target_shape, mask_strategy=mask_strategy, mask_args=mask_args, memory=memory, memory_level=memory_level, n_jobs=n_jobs, verbose=verbose) self.n_epochs = n_epochs self.batch_size = batch_size self.method = method self.alpha = alpha self.reduction_ratio = reduction_ratio self.dict_init = dict_init
def __init__(self, penalty="graph-net", l1_ratios=.5, alphas=None, n_alphas=10, mask=None, target_affine=None, target_shape=None, low_pass=None, high_pass=None, t_r=None, max_iter=1000, tol=1e-4, memory=Memory(None), memory_level=1, standardize=True, verbose=1, n_jobs=1, eps=1e-3, cv=8, fit_intercept=True, screening_percentile=20., debias=False): super(SpaceNetRegressor, self).__init__( penalty=penalty, is_classif=False, l1_ratios=l1_ratios, alphas=alphas, n_alphas=n_alphas, target_shape=target_shape, low_pass=low_pass, high_pass=high_pass, mask=mask, t_r=t_r, max_iter=max_iter, tol=tol, memory=memory, memory_level=memory_level, n_jobs=n_jobs, eps=eps, cv=cv, debias=debias, fit_intercept=fit_intercept, standardize=standardize, screening_percentile=screening_percentile, target_affine=target_affine, verbose=verbose)
def __init__(self, maps_img, mask_img=None, allow_overlap=True, smoothing_fwhm=None, standardize=False, detrend=False, low_pass=None, high_pass=None, t_r=None, resampling_target="data", memory=Memory(cachedir=None, verbose=0), memory_level=0, verbose=0): self.maps_img = maps_img self.mask_img = mask_img # Maps Masker parameter self.allow_overlap = allow_overlap # Parameters for image.smooth self.smoothing_fwhm = smoothing_fwhm # Parameters for clean() self.standardize = standardize self.detrend = detrend self.low_pass = low_pass self.high_pass = high_pass self.t_r = t_r # Parameters for resampling self.resampling_target = resampling_target # Parameters for joblib self.memory = memory self.memory_level = memory_level self.verbose = verbose if resampling_target not in ("mask", "maps", "data", None): raise ValueError("invalid value for 'resampling_target'" " parameter: " + str(resampling_target)) if self.mask_img is None and resampling_target == "mask": raise ValueError( "resampling_target has been set to 'mask' but no mask " "has been provided.\nSet resampling_target to something else" " or provide a mask.")
def decompose_run( smoothing_fwhm, batch_size, learning_rate, verbose, reduction, alpha, n_jobs, n_epochs, buffer_size, init, _seed, ): n_components = init['n_components'] dict_init = load_init() train_data, test_data, mask = load_data() memory = Memory(cachedir=get_cache_dirs()[0], verbose=2) cb = rfMRIDictionaryScorer(test_data) dict_fact = fMRIDictFact( smoothing_fwhm=smoothing_fwhm, mask=mask, memory=memory, memory_level=2, verbose=verbose, n_epochs=n_epochs, n_jobs=n_jobs, random_state=_seed, n_components=n_components, dict_init=dict_init, learning_rate=learning_rate, batch_size=batch_size, reduction=reduction, alpha=alpha, buffer_size=buffer_size, callback=cb, ) dict_fact.fit(train_data) dict_fact.components_.to_filename('components.nii.gz') fig = plt.figure() display_maps(fig, dict_fact.components_) fig, ax = plt.subplots(1, 1) ax.plot(cb.time, cb.score, marker='o') plt.show()
def motion_correction_pypreprocess(in_file, out_path, force_mean_reference, extra_params={}): """ an attempt at motion correction using pypreprocess package. inputs: in_file: path to the input file or input file loaded as an nibabel image. out_path: path to the future output file force_mean_reference: if evaluated True, adjust motion according to the mean image; otherwise adjust to the first volume. extra_params: extra parameters to MRIMotionCorrection return: the motion corrected image """ if force_mean_reference: # calculate the mean and insert to the front print('motion correction referenced to mean!') in_file = math_img('np.insert(img, 0, np.mean(img, axis=-1), axis=3)', img=in_file) else: print('motion correction referenced to the first slice.') # instantiate realigner if 'MRIMotionCorrection' in extra_params: print 'extra parameters are used for MRIMotionCorrection: %s' % extra_params[ 'MRIMotionCorrection'] mrimc = MRIMotionCorrection(**extra_params['MRIMotionCorrection']) else: mrimc = MRIMotionCorrection() # fit realigner if USE_CACHE: mem = Memory("func_preproc_cache") mrimc = mem.cache(mrimc.fit)(in_file) else: mrimc = mrimc.fit(in_file) # write realigned files to disk result = mrimc.transform(concat=True)['realigned_images'][0] if force_mean_reference: # remove the first frame, which was the mean result = math_img('img[...,1:]', img=result) if out_path: nib.save(result, out_path) return result
def fit(self, X, y=None, get_rhos=False): ''' Sets up for divergence estimation "from" new data "to" X. Builds FLANN indices for each bag, and maybe gets within-bag distances. Parameters ---------- X : list of arrays or :class:`skl_groups.features.Features` The bags to search "to". get_rhos : boolean, optional, default False Compute within-bag distances :attr:`rhos_`. These are only needed for some divergence functions or if do_sym is passed, and they'll be computed (and saved) during :meth:`transform` if they're not computed here. If you're using Jensen-Shannon divergence, a higher max_K may be needed once it sees the number of points in the transformed bags, so the computation here might be wasted. ''' self.features_ = X = as_features(X, stack=True, bare=True) # if we're using a function that needs to pick its K vals itself, # then we need to set max_K here. when we transform(), might have to # re-do this :| Ks = self._get_Ks() _, _, _, max_K, save_all_Ks, _ = _choose_funcs(self.div_funcs, Ks, X.dim, X.n_pts, None, self.version) if max_K >= X.n_pts.min(): msg = "asked for K = {}, but there's a bag with only {} points" raise ValueError(msg.format(max_K, X.n_pts.min())) memory = self.memory if isinstance(memory, string_types): memory = Memory(cachedir=memory, verbose=0) self.indices_ = id = memory.cache(_build_indices)(X, self._flann_args()) if get_rhos: self.rhos_ = _get_rhos(X, id, Ks, max_K, save_all_Ks, self.min_dist) elif hasattr(self, 'rhos_'): del self.rhos_ return self
def __init__(self, n_clusters=2, affinity="euclidean", memory=Memory(cachedir=None, verbose=0), connectivity=None, n_components=None, compute_full_tree='auto', linkage='ward', pooling_func=np.mean): self.n_clusters = n_clusters self.memory = memory self.n_components = n_components self.connectivity = connectivity self.compute_full_tree = compute_full_tree self.linkage = linkage self.affinity = affinity self.pooling_func = pooling_func
def _niigz2nii(self): """ Convert .nii.gz to .nii (crucial for SPM). """ cache_dir = os.path.join(self.scratch, 'cache_dir') mem = Memory(cache_dir, verbose=100) self._sanitize_session_output_dirs() if not None in [self.func, self.n_sessions, self.session_output_dirs]: self.func = [ mem.cache(do_niigz2nii)( self.func[sess], output_dir=self.session_output_dirs[sess]) for sess in range(self.n_sessions) ] if not self.anat is None: self.anat = mem.cache(do_niigz2nii)( self.anat, output_dir=self.anat_output_dir)
def fit(self, niimgs, y=None): """Compute the mask corresponding to the data Parameters ---------- niimgs: list of filenames or NiImages Data on which the mask must be calculated. If this is a list, the affine is considered the same for all. """ memory = self.memory if isinstance(memory, basestring): memory = Memory(cachedir=memory) # Load data (if filenames are given, load them) if self.verbose > 0: print "[%s.fit] Loading data" % self.__class__.__name__ niimgs = utils.check_niimgs(niimgs, accept_3d=True) # Compute the mask if not given by the user if self.mask is None: if self.verbose > 0: print "[%s.fit] Computing the mask" % self.__class__.__name__ mask = memory.cache(masking.compute_epi_mask)( niimgs.get_data(), connected=self.mask_connected, opening=self.mask_opening, lower_cutoff=self.mask_lower_cutoff, upper_cutoff=self.mask_upper_cutoff, verbose=(self.verbose -1)) self.mask_ = Nifti1Image(mask.astype(np.int), niimgs.get_affine()) else: self.mask_ = utils.check_niimg(self.mask) # If resampling is requested, resample also the mask # Resampling: allows the user to change the affine, the shape or both if self.verbose > 0: print "[%s.transform] Resampling mask" % self.__class__.__name__ self.mask_ = memory.cache(resampling.resample_img)(self.mask_, target_affine=self.target_affine, target_shape=self.target_shape, copy=(self.target_affine is not None and self.target_shape is not None)) return self
def test__safe_cache_dir_creation(): # Test the _safe_cache function that is supposed to flush the # cache if the nibabel version changes try: temp_dir = tempfile.mkdtemp() mem = Memory(cachedir=temp_dir) version_file = os.path.join(temp_dir, 'joblib', 'module_versions.json') assert_false(os.path.exists(version_file)) # First test that a version file get created cache_mixin._safe_cache(mem, f) assert_true(os.path.exists(version_file)) # Test that it does not get recreated during the same session os.unlink(version_file) cache_mixin._safe_cache(mem, f) assert_false(os.path.exists(version_file)) finally: if os.path.exists(temp_dir): shutil.rmtree(temp_dir)
def __init__(self, data, n_clusters=2, affinity='euclidean', memory=Memory(cachedir=None), connectivity=None, compute_full_tree='auto', linkage='ward', pooling_func=np.mean): super(AgglomerativeClustering, self).__init__() self.data = data self.n_clusters = n_clusters self.affinity = affinity self.memory = memory self.connectivity = connectivity self.compute_full_tree = compute_full_tree self.linkage = linkage self.pooling_func = pooling_func
def __init__(self, maps_img, mask_img=None, min_region_size=1350, threshold=1., thresholding_strategy='ratio_n_voxels', extractor='local_regions', smoothing_fwhm=6, standardize=False, detrend=False, low_pass=None, high_pass=None, t_r=None, memory=Memory(cachedir=None), memory_level=0, verbose=0): super(RegionExtractor, self).__init__( maps_img=maps_img, mask_img=mask_img, smoothing_fwhm=smoothing_fwhm, standardize=standardize, detrend=detrend, low_pass=low_pass, high_pass=high_pass, t_r=t_r, memory=memory, memory_level=memory_level, verbose=verbose) self.maps_img = maps_img self.min_region_size = min_region_size self.thresholding_strategy = thresholding_strategy self.threshold = threshold self.extractor = extractor self.smoothing_fwhm = smoothing_fwhm
def __init__(self, mask=None, n_components=20, smoothing_fwhm=6, do_cca=True, threshold='auto', n_init=10, random_state=None, standardize=True, detrend=True, low_pass=None, high_pass=None, t_r=None, target_affine=None, target_shape=None, mask_strategy='epi', mask_args=None, memory=Memory(cachedir=None), memory_level=0, n_jobs=1, verbose=0): super(CanICA, self).__init__( n_components=n_components, do_cca=do_cca, random_state=random_state, # feature_compression=feature_compression, mask=mask, smoothing_fwhm=smoothing_fwhm, standardize=standardize, detrend=detrend, low_pass=low_pass, high_pass=high_pass, t_r=t_r, target_affine=target_affine, target_shape=target_shape, mask_strategy=mask_strategy, mask_args=mask_args, memory=memory, memory_level=memory_level, n_jobs=n_jobs, verbose=verbose) self.threshold = threshold self.n_init = n_init
def __init__( self, n_components=20, alpha=0.1, dict_init=None, transform_batch_size=None, mask=None, smoothing_fwhm=None, standardize=True, detrend=True, low_pass=None, high_pass=None, t_r=None, target_affine=None, target_shape=None, mask_strategy='background', mask_args=None, memory=Memory(cachedir=None), memory_level=2, n_jobs=1, verbose=0, ): BaseNilearnEstimator.__init__(self, mask=mask, smoothing_fwhm=smoothing_fwhm, standardize=standardize, detrend=detrend, low_pass=low_pass, high_pass=high_pass, t_r=t_r, target_affine=target_affine, target_shape=target_shape, mask_strategy=mask_strategy, mask_args=mask_args, memory=memory, memory_level=memory_level, n_jobs=n_jobs, verbose=verbose) self.n_components = n_components self.transform_batch_size = transform_batch_size self.dict_init = dict_init self.alpha = alpha
def __init__(self, min_cluster_size=5, min_samples=None, metric='euclidean', alpha=1.0, p=None, algorithm='best', leaf_size=40, memory=Memory(cachedir=None, verbose=0), approx_min_span_tree=True, gen_min_span_tree=False, core_dist_n_jobs=4, cluster_selection_method='eom', allow_single_cluster=False, prediction_data=False, match_reference_implementation=False, **kwargs): self.min_cluster_size = min_cluster_size self.min_samples = min_samples self.alpha = alpha self.metric = metric self.p = p self.algorithm = algorithm self.leaf_size = leaf_size self.memory = memory self.approx_min_span_tree = approx_min_span_tree self.gen_min_span_tree = gen_min_span_tree self.core_dist_n_jobs = core_dist_n_jobs self.cluster_selection_method = cluster_selection_method self.allow_single_cluster = allow_single_cluster self.match_reference_implementation = match_reference_implementation self.prediction_data = prediction_data self._metric_kwargs = kwargs self._condensed_tree = None self._single_linkage_tree = None self._min_spanning_tree = None self._raw_data = None self._outlier_scores = None self._prediction_data = None self._relative_validity = None
def transform(self, X): r''' Computes the divergences from X to :attr:`features_`. Parameters ---------- X : list of bag feature arrays or :class:`skl_groups.features.Features` The bags to search "from". Returns ------- divs : array of shape ``[len(div_funcs), len(Ks), len(X), len(features_)] + ([2] if do_sym else [])`` The divergences from X to :attr:`features_`. ``divs[d, k, i, j]`` is the ``div_funcs[d]`` divergence from ``X[i]`` to ``fetaures_[j]`` using a K of ``Ks[k]``. If ``do_sym``, ``divs[d, k, i, j, 0]`` is :math:`D_{d,k}( X_i \| \texttt{features_}_j)` and ``divs[d, k, i, j, 1]`` is :math:`D_{d,k}(\texttt{features_}_j \| X_i)`. ''' X = as_features(X, stack=True, bare=True) Y = self.features_ Ks = np.asarray(self.Ks) if X.dim != Y.dim: msg = "incompatible dimensions: fit with {}, transform with {}" raise ValueError(msg.format(Y.dim, X.dim)) memory = self.memory if isinstance(memory, string_types): memory = Memory(cachedir=memory, verbose=0) # ignore Y_indices to avoid slow pickling of them # NOTE: if the indices are approximate, then might not get the same # results! est = memory.cache(_est_divs, ignore=['n_jobs', 'Y_indices', 'Y_rhos']) output, self.rhos_ = est(X, Y, self.indices_, getattr(self, 'rhos_', None), self.div_funcs, Ks, self.do_sym, self.clamp, self.version, self.min_dist, self._flann_args(), self._n_jobs) return output
def __init__( self, dictionary, alpha=0.1, transform_batch_size=None, mask=None, smoothing_fwhm=None, standardize=False, detrend=False, low_pass=None, high_pass=None, t_r=None, target_affine=None, target_shape=None, mask_strategy='background', mask_args=None, memory=Memory(cachedir=None), memory_level=2, n_jobs=1, verbose=0, ): self.dictionary = dictionary fMRICoderMixin.__init__(self, n_components=None, alpha=alpha, dict_init=self.dictionary, mask=mask, smoothing_fwhm=smoothing_fwhm, standardize=standardize, detrend=detrend, low_pass=low_pass, high_pass=high_pass, transform_batch_size=transform_batch_size, t_r=t_r, target_affine=target_affine, target_shape=target_shape, mask_strategy=mask_strategy, mask_args=mask_args, memory=memory, memory_level=memory_level, n_jobs=n_jobs, verbose=verbose)
def __init__(self, seeds, radius=None, mask_img=None, xform_fn=partial(np.mean, axis=0), loop_axis=1, smoothing_fwhm=None, standardize=False, detrend=False, low_pass=None, high_pass=None, t_r=None, memory=Memory(cachedir=None, verbose=0), memory_level=1, verbose=0): if is_img(seeds): self.seeds_img = check_niimg(seeds, ensure_3d=True) self.seeds = None else: self.seeds = seeds self.seeds_img = None # compute via fit() self.mask_img = mask_img self.radius = radius self.xform_fn = xform_fn self.loop_axis = loop_axis # Parameters for _smooth_array self.smoothing_fwhm = smoothing_fwhm # Parameters for clean() self.standardize = standardize self.detrend = detrend self.low_pass = low_pass self.high_pass = high_pass self.t_r = t_r # Parameters for joblib self.memory = memory self.memory_level = memory_level self.verbose = verbose
def _do_subject_slice_timing(subject_data, ref_slice=0, slice_order="ascending", interleaved=False, caching=True, write_output_images=2, func_prefix=None, func_basenames=None, ext=None): if func_prefix is None: func_prefix = PREPROC_OUTPUT_IMAGE_PREFICES['STC'] if func_basenames is None: func_basenames = [get_basenames(func) for func in subject_data.func] # prepare for smart caching if caching: mem = Memory(cachedir=os.path.join(subject_data.output_dir, 'cache_dir'), verbose=100) runner = lambda handle: mem.cache(handle) if caching else handle stc_output = [] original_bold = subject_data.func for sess_func, sess_id in zip(subject_data.func, range(subject_data.n_sessions)): fmristc = runner( fMRISTC(slice_order=slice_order, ref_slice=ref_slice, interleaved=interleaved, verbose=True).fit)(raw_data=sess_func) stc_output.append( runner(fmristc.transform)(sess_func, output_dir=subject_data.tmp_output_dir if (write_output_images > 0) else None, basenames=func_basenames[sess_id], prefix=func_prefix, ext=ext)) subject_data.func = stc_output del original_bold, fmristc if write_output_images > 1: subject_data.hardlink_output_files() return subject_data
def __init__(self, labels_img, background_label=0, mask_img=None, smoothing_fwhm=None, standardize=False, detrend=False, low_pass=None, high_pass=None, t_r=None, dtype=None, resampling_target="data", memory=Memory(cachedir=None, verbose=0), memory_level=1, verbose=0): self.labels_img = labels_img self.background_label = background_label self.mask_img = mask_img # Parameters for _smooth_array self.smoothing_fwhm = smoothing_fwhm # Parameters for clean() self.standardize = standardize self.detrend = detrend self.low_pass = low_pass self.high_pass = high_pass self.t_r = t_r self.dtype = dtype # Parameters for resampling self.resampling_target = resampling_target # Parameters for joblib self.memory = memory self.memory_level = memory_level self.verbose = verbose if resampling_target not in ("labels", "data", None): raise ValueError("invalid value for 'resampling_target' " "parameter: " + str(resampling_target))
def __init__(self, div_funcs=('kl', ), Ks=(3, ), do_sym=False, n_jobs=1, clamp=True, min_dist=1e-3, flann_algorithm='auto', flann_args=None, version='best', memory=Memory(cachedir=None, verbose=0)): self.div_funcs = div_funcs self.Ks = Ks self.do_sym = do_sym self.n_jobs = n_jobs self.clamp = clamp self.min_dist = min_dist self.flann_algorithm = flann_algorithm self.flann_args = flann_args self.version = version self.memory = memory
def build_pipelline(base_model, pipeline, param_grid, reduce_dim_param_grid, grid_search_parameters, cache_dirs): param_grid = [{ 'classify__' + key: value for (key, value) in inner_param_grid.items() } for inner_param_grid in param_grid] if type(param_grid) == list: for i in range(len(param_grid)): for key, value in reduce_dim_param_grid.items(): param_grid[i][key] = value if type(param_grid) == dict: for key, value in reduce_dim_param_grid.items(): param_grid[key] = value cachedir = mkdtemp() cache_dirs.append(cachedir) memory = Memory(cachedir=cachedir, verbose=0) pipe = Pipeline(pipeline, memory=memory) gridsearch = GridSearchCV(pipe, param_grid=param_grid, **grid_search_parameters) return gridsearch
def __init__(self, mask_img=None, smoothing_fwhm=None, standardize=False, detrend=False, low_pass=None, high_pass=None, t_r=None, target_affine=None, target_shape=None, mask_strategy='background', mask_args=None, memory=Memory(cachedir=None), memory_level=0, n_jobs=1, verbose=0 ): # Mask is provided or computed MultiNiftiMasker.__init__(self, mask_img=mask_img, n_jobs=n_jobs, smoothing_fwhm=smoothing_fwhm, standardize=standardize, detrend=detrend, low_pass=low_pass, high_pass=high_pass, t_r=t_r, target_affine=target_affine, target_shape=target_shape, mask_strategy=mask_strategy, mask_args=mask_args, memory=memory, memory_level=memory_level, verbose=verbose)
def __init__(self, n_components=20, smoothing_fwhm=None, mask=None, do_cca=True, standardize=True, target_affine=None, target_shape=None, low_pass=None, high_pass=None, t_r=None, memory=Memory(cachedir=None), memory_level=0, n_jobs=1, verbose=0, ): self.mask = mask self.memory = memory self.memory_level = memory_level self.n_jobs = n_jobs self.verbose = verbose self.low_pass = low_pass self.high_pass = high_pass self.t_r = t_r self.do_cca = do_cca self.n_components = n_components self.smoothing_fwhm = smoothing_fwhm self.target_affine = target_affine self.target_shape = target_shape self.standardize = standardize
def __init__(self, mask_img=None, sessions=None, smoothing_fwhm=None, standardize=False, detrend=False, low_pass=None, high_pass=None, t_r=None, target_affine=None, target_shape=None, mask_strategy='background', mask_args=None, sample_mask=None, dtype=None, memory_level=1, memory=Memory(cachedir=None), verbose=0): # Mask is provided or computed self.mask_img = mask_img self.sessions = sessions self.smoothing_fwhm = smoothing_fwhm self.standardize = standardize self.detrend = detrend self.low_pass = low_pass self.high_pass = high_pass self.t_r = t_r self.target_affine = target_affine self.target_shape = target_shape self.mask_strategy = mask_strategy self.mask_args = mask_args self.sample_mask = sample_mask self.dtype = dtype self.memory = memory self.memory_level = memory_level self.verbose = verbose self._shelving = False
def __init__(self, projection=None, scaler=None, cover=None, clusterer=None, remove_duplicate_nodes=False, memory='dyneusr_cache', verbose=1): """ Wraps KeplerMapper Usage ----- mapper = KMapperWrapper(projection=PCA(3), cover=dict(r=10, g=2)) l = mapper.fit(X) g = mapper.map(l, X) # or g = mapper.fit_map(X) """ try: from kmapper import KeplerMapper from kmapper.cover import Cover except ImportError as e: print("[warning]", e) # init mapper self.mapper = KeplerMapper() self.verbose = verbose # [1] fit params self.projection = projection if projection is not None else PCA(2) self.scaler = scaler #or MinMaxScaler() # [2] map params self.clusterer = clusterer or DBSCAN(eps=1, min_samples=2) self.cover = cover or Cover(10, 0.5) self.remove_duplicate_nodes = remove_duplicate_nodes # setup memory self.memory = Memory(memory, verbose=verbose)
def __init__(self, n_components=20, mask=None, smoothing_fwhm=None, do_cca=True, random_state=None, standardize=False, detrend=False, low_pass=None, high_pass=None, t_r=None, target_affine=None, target_shape=None, mask_strategy='epi', mask_args=None, memory=Memory(cachedir=None), memory_level=0, n_jobs=1, verbose=0): self.n_components = n_components self.do_cca = do_cca BaseDecomposition.__init__(self, n_components=n_components, random_state=random_state, mask=mask, smoothing_fwhm=smoothing_fwhm, standardize=standardize, detrend=detrend, low_pass=low_pass, high_pass=high_pass, t_r=t_r, target_affine=target_affine, target_shape=target_shape, mask_strategy=mask_strategy, mask_args=mask_args, memory=memory, memory_level=memory_level, n_jobs=n_jobs, verbose=verbose)
def __init__(self, method, n_parcels=50, random_state=0, mask=None, smoothing_fwhm=4., standardize=False, detrend=False, low_pass=None, high_pass=None, t_r=None, target_affine=None, target_shape=None, mask_strategy='epi', mask_args=None, memory=Memory(cachedir=None), memory_level=0, n_jobs=1, verbose=1): self.method = method self.n_parcels = n_parcels MultiPCA.__init__(self, n_components=200, random_state=random_state, mask=mask, memory=memory, smoothing_fwhm=smoothing_fwhm, standardize=standardize, detrend=detrend, low_pass=low_pass, high_pass=high_pass, t_r=t_r, target_affine=target_affine, target_shape=target_shape, mask_strategy=mask_strategy, mask_args=mask_args, memory_level=memory_level, n_jobs=n_jobs, verbose=verbose)
def _safe_filter_and_mask(niimgs, mask_img_, parameters, ref_memory_level=0, memory=Memory(cachedir=None), verbose=0, confounds=None, reference_affine=None, copy=True): niimgs = _utils.check_niimgs(niimgs, accept_3d=True) # If there is a reference affine, we may have to force resampling target_affine = parameters['target_affine'] if (target_affine is None and reference_affine is not None and reference_affine.shape == niimgs.get_affine().shape and not np.allclose(niimgs.get_affine(), reference_affine)): warnings.warn('Affine is different across subjects.' ' Realignement on first subject affine forced') parameters = parameters.copy() parameters['target_affine'] = reference_affine return filter_and_mask(niimgs, mask_img_, parameters, ref_memory_level, memory, verbose, confounds, copy)