コード例 #1
0
def _do_outer_cv(searcher, X, y, outer_cv, scoring, error_score="raise", outfile=None):
    """Do outer cross-validation for nested CV

    Parameters
    ----------
    searcher : object
        SearchCV object
    X : numpy array
        Containing features
    y : numpy array
        Target values or labels
    outer_cv : int or CV splitter
        Control the cv splitting
    scoring : object
        Scorer
    error_score: str, float or numpy float
        Whether to raise fit error or return an value
    outfile : str
        File path to store the restuls
    """
    if error_score == "raise":
        rval = cross_validate(
            searcher,
            X,
            y,
            scoring=scoring,
            cv=outer_cv,
            n_jobs=N_JOBS,
            verbose=0,
            error_score=error_score,
        )
    else:
        warnings.simplefilter("always", FitFailedWarning)
        with warnings.catch_warnings(record=True) as w:
            try:
                rval = cross_validate(
                    searcher,
                    X,
                    y,
                    scoring=scoring,
                    cv=outer_cv,
                    n_jobs=N_JOBS,
                    verbose=0,
                    error_score=error_score,
                )
            except ValueError:
                pass
            for warning in w:
                print(repr(warning.message))

    keys = list(rval.keys())
    for k in keys:
        if k.startswith("test"):
            rval["mean_" + k] = np.mean(rval[k])
            rval["std_" + k] = np.std(rval[k])
        if k.endswith("time"):
            rval.pop(k)
    rval = pd.DataFrame(rval)
    rval = rval[sorted(rval.columns)]
    rval.to_csv(path_or_buf=outfile, sep="\t", header=True, index=False)
コード例 #2
0
ファイル: roi_decoding.py プロジェクト: robbisg/mvpa_itab_wu
    def _fit(self, ds, cv_attr=None):
        """General method to fit data"""
                   
        self.scoring, _ = _check_multimetric_scoring(self.estimator, scoring=self.scoring)
        
        X, y = get_ds_data(ds)
        y = LabelEncoder().fit_transform(y)
        indices = self._get_permutation_indices(len(y))
                
        values = []
        
        groups = None
        if cv_attr != None:
            groups = LabelEncoder().fit_transform(ds.sa[cv_attr].value)
        
        
        for idx in indices:
            
            y_ = y[idx]

            scores = cross_validate(self.estimator, X, y_, groups,
                                  self.scoring, self.cv, self.n_jobs,
                                  self.verbose, return_estimator=True, return_splits=True)
            
            values.append(scores)
        
        return values
コード例 #3
0
def task3():
    df = pd.read_csv('people.csv')

    # This should go in the clean data function but I wanted to show it explicitly
    df['age'] = pd.to_numeric(df['age'], errors='coerce')
    df['age'] = df['age'].fillna(round(df['age'].mean()))
    df['age'] = df['age'].astype(int)

    df = df.dropna()

    clean_data(df)
    encode_data(df)

    older_females = df[(df['Gender'] == 'Female') &
                       (df['age'] > 30)]  # Dataset for females older than 30

    X = older_females[['education', 'age', 'job']]
    y = older_females[['workType']].values.ravel()

    models = []

    models.append(('Decision Tree Classifier', DecisionTreeClassifier()))
    models.append(('Naive Bayes', GaussianNB()))
    models.append(('Random Forest Classifier', RandomForestClassifier()))

    results = {}

    for name, model in models:
        cv_results = cross_validate(
            model, X, y, cv=5, scoring='accuracy',
            return_train_score=True)  # Setting Cross Validation to 5
        results[name] = cv_results

    for models in results:
        print(models)
        print('Training:', results[models]['train_score'].mean())
        print('Test: ', results[models]['test_score'].mean())

    labels = [
        'Decision Tree Classifier', 'Naive Bayes', 'Random Forest Classifier'
    ]
    train_means = [results[models]['train_score'].mean() for models in results]
    test_means = [results[models]['test_score'].mean() for models in results]

    x = np.arange(len(labels))
    width = 0.35

    fig, ax = plt.subplots()
    ax.bar(x - width / 2, train_means, width, label="Training Accuracy")
    rects2 = ax.bar(x + width / 2, test_means, width, label="Testing Accuracy")

    ax.set_xlabel('Classifier')
    ax.set_ylabel('Average Accuracy')
    ax.set_title('Accuracy Scores')
    ax.set_xticks(x)
    ax.set_xticklabels(labels)
    ax.legend()

    plt.show()
    '''
コード例 #4
0
def fit_and_save(estimator,
                 X,
                 y=None,
                 groups=None,
                 scoring=None,
                 cv=None,
                 n_jobs=1,
                 verbose=0,
                 fit_params=None,
                 pre_dispatch='2*n_jobs',
                 return_train_score=True,
                 parameters=dict(),
                 uuid='',
                 url='http://127.0.0.1:8000'):

    import json, requests, numpy
    from sklearn.model_selection._validation import cross_validate

    X, y, groups = indexable(X, y, groups)

    cv = check_cv(cv, y, classifier=is_classifier(estimator))
    scorers, _ = _check_multimetric_scoring(estimator, scoring=scoring)

    _base_scores = [0. for _ in range(cv.get_n_splits(X, y, groups))]

    cv_score = {}
    cv_score.update(
        {'train_%s' % s: numpy.array(_base_scores)
         for s in scorers})
    cv_score.update(
        {'test_%s' % s: numpy.array(_base_scores)
         for s in scorers})
    cv_score.update({'fit_time': _base_scores, 'score_time': _base_scores})

    try:
        cv_score = cross_validate(estimator, X, y, groups, scorers, cv, n_jobs,
                                  verbose, fit_params, pre_dispatch,
                                  return_train_score)
        error = None
    except Exception as e:
        error = '{}: {}'.format(type(e).__name__, str(e))

    try:
        for k, v in cv_score.items():
            if type(v) == type(numpy.array([])):
                cv_score[k] = v.tolist()
        response = requests.post('{url}/grids/{uuid}/results'.format(
            url=url, uuid=uuid),
                                 data={
                                     'gridsearch': uuid,
                                     'params': json.dumps(parameters),
                                     'errors': error,
                                     'cv_data': json.dumps(cv_score)
                                 })

    except requests.exceptions.ConnectionError as e:
        response = None
    if response is None:
        return
    return response
コード例 #5
0
def main():
    # Checks
    checkUsage()

    # Parameters
    k = 10
    nbTrees = 100

    dataSetPath = sys.argv[1]
    data = pd.read_json(dataSetPath)

    # Shuffle Data
    data = shuffle(data)

    # Random Forest Model
    classifierKFold = RandomForestClassifier(n_estimators=nbTrees,
                                             random_state=0,
                                             verbose=0)

    X = data.iloc[:, [1, 3, 6, 7, 8, 9, 10, 11, 12]].values
    y = data.iloc[:, 4].values

    # Cross validation, K = 10, using stratified folds

    scoring = {
        'precision': make_scorer(precision),
        'recall': make_scorer(recall),
        'f1': make_scorer(f1),
        'auc': make_scorer(auc),
        'fpr': make_scorer(fpr),
        'tnr': make_scorer(tnr),
        'mcc': make_scorer(mcc)
    }

    scores = cross_validate(classifierKFold,
                            X,
                            y,
                            cv=k,
                            scoring=scoring,
                            verbose=4)

    print("\nMetrics with K-fold:", k, ", nbTree:", nbTrees)

    displayScores(scores['test_precision'], "Precision")
    displayScores(scores['test_recall'], "Recall")
    displayScores(scores['test_f1'], "F1")
    displayScores(scores['test_tnr'], "TNR")
    displayScores(scores['test_fpr'], "FPR")
    displayScores(scores['test_auc'], "AUC")
    displayScores(scores['test_mcc'], "MCC")
コード例 #6
0
def main(inputs,
         infile_estimator,
         infile1,
         infile2,
         outfile_result,
         outfile_object=None,
         outfile_weights=None,
         groups=None,
         ref_seq=None,
         intervals=None,
         targets=None,
         fasta_path=None):
    """
    Parameter
    ---------
    inputs : str
        File path to galaxy tool parameter

    infile_estimator : str
        File path to estimator

    infile1 : str
        File path to dataset containing features

    infile2 : str
        File path to dataset containing target values

    outfile_result : str
        File path to save the results, either cv_results or test result

    outfile_object : str, optional
        File path to save searchCV object

    outfile_weights : str, optional
        File path to save model weights

    groups : str
        File path to dataset containing groups labels

    ref_seq : str
        File path to dataset containing genome sequence file

    intervals : str
        File path to dataset containing interval file

    targets : str
        File path to dataset compressed target bed file

    fasta_path : str
        File path to dataset containing fasta file
    """
    warnings.simplefilter('ignore')

    # store read dataframe object
    loaded_df = {}

    with open(inputs, 'r') as param_handler:
        params = json.load(param_handler)

    # Override the refit parameter
    params['search_schemes']['options']['refit'] = True \
        if params['save'] != 'nope' else False

    with open(infile_estimator, 'rb') as estimator_handler:
        estimator = load_model(estimator_handler)

    optimizer = params['search_schemes']['selected_search_scheme']
    optimizer = getattr(model_selection, optimizer)

    # handle gridsearchcv options
    options = params['search_schemes']['options']

    if groups:
        header = 'infer' if (
            options['cv_selector']['groups_selector']['header_g']) else None
        column_option = (
            options['cv_selector']['groups_selector']
            ['column_selector_options_g']['selected_column_selector_option_g'])
        if column_option in [
                'by_index_number', 'all_but_by_index_number', 'by_header_name',
                'all_but_by_header_name'
        ]:
            c = (options['cv_selector']['groups_selector']
                 ['column_selector_options_g']['col_g'])
        else:
            c = None

        df_key = groups + repr(header)

        groups = pd.read_csv(groups, sep='\t', header=header, parse_dates=True)
        loaded_df[df_key] = groups

        groups = read_columns(groups,
                              c=c,
                              c_option=column_option,
                              sep='\t',
                              header=header,
                              parse_dates=True)
        groups = groups.ravel()
        options['cv_selector']['groups_selector'] = groups

    splitter, groups = get_cv(options.pop('cv_selector'))
    options['cv'] = splitter
    primary_scoring = options['scoring']['primary_scoring']
    options['scoring'] = get_scoring(options['scoring'])
    if options['error_score']:
        options['error_score'] = 'raise'
    else:
        options['error_score'] = np.NaN
    if options['refit'] and isinstance(options['scoring'], dict):
        options['refit'] = primary_scoring
    if 'pre_dispatch' in options and options['pre_dispatch'] == '':
        options['pre_dispatch'] = None

    params_builder = params['search_schemes']['search_params_builder']
    param_grid = _eval_search_params(params_builder)

    estimator = clean_params(estimator)

    # save the SearchCV object without fit
    if params['save'] == 'save_no_fit':
        searcher = optimizer(estimator, param_grid, **options)
        print(searcher)
        with open(outfile_object, 'wb') as output_handler:
            pickle.dump(searcher, output_handler, pickle.HIGHEST_PROTOCOL)
        return 0

    # read inputs and loads new attributes, like paths
    estimator, X, y = _handle_X_y(estimator,
                                  params,
                                  infile1,
                                  infile2,
                                  loaded_df=loaded_df,
                                  ref_seq=ref_seq,
                                  intervals=intervals,
                                  targets=targets,
                                  fasta_path=fasta_path)

    # cache iraps_core fits could increase search speed significantly
    memory = joblib.Memory(location=CACHE_DIR, verbose=0)
    main_est = get_main_estimator(estimator)
    if main_est.__class__.__name__ == 'IRAPSClassifier':
        main_est.set_params(memory=memory)

    searcher = optimizer(estimator, param_grid, **options)

    split_mode = params['outer_split'].pop('split_mode')

    if split_mode == 'nested_cv':
        # make sure refit is choosen
        # this could be True for sklearn models, but not the case for
        # deep learning models
        if not options['refit'] and \
                not all(hasattr(estimator, attr)
                        for attr in ('config', 'model_type')):
            warnings.warn("Refit is change to `True` for nested validation!")
            setattr(searcher, 'refit', True)

        outer_cv, _ = get_cv(params['outer_split']['cv_selector'])
        # nested CV, outer cv using cross_validate
        if options['error_score'] == 'raise':
            rval = cross_validate(
                searcher,
                X,
                y,
                scoring=options['scoring'],
                cv=outer_cv,
                n_jobs=N_JOBS,
                verbose=options['verbose'],
                return_estimator=(params['save'] == 'save_estimator'),
                error_score=options['error_score'],
                return_train_score=True)
        else:
            warnings.simplefilter('always', FitFailedWarning)
            with warnings.catch_warnings(record=True) as w:
                try:
                    rval = cross_validate(
                        searcher,
                        X,
                        y,
                        scoring=options['scoring'],
                        cv=outer_cv,
                        n_jobs=N_JOBS,
                        verbose=options['verbose'],
                        return_estimator=(params['save'] == 'save_estimator'),
                        error_score=options['error_score'],
                        return_train_score=True)
                except ValueError:
                    pass
                for warning in w:
                    print(repr(warning.message))

        fitted_searchers = rval.pop('estimator', [])
        if fitted_searchers:
            import os
            pwd = os.getcwd()
            save_dir = os.path.join(pwd, 'cv_results_in_folds')
            try:
                os.mkdir(save_dir)
                for idx, obj in enumerate(fitted_searchers):
                    target_name = 'cv_results_' + '_' + 'split%d' % idx
                    target_path = os.path.join(pwd, save_dir, target_name)
                    cv_results_ = getattr(obj, 'cv_results_', None)
                    if not cv_results_:
                        print("%s is not available" % target_name)
                        continue
                    cv_results_ = pd.DataFrame(cv_results_)
                    cv_results_ = cv_results_[sorted(cv_results_.columns)]
                    cv_results_.to_csv(target_path,
                                       sep='\t',
                                       header=True,
                                       index=False)
            except Exception as e:
                print(e)
            finally:
                del os

        keys = list(rval.keys())
        for k in keys:
            if k.startswith('test'):
                rval['mean_' + k] = np.mean(rval[k])
                rval['std_' + k] = np.std(rval[k])
            if k.endswith('time'):
                rval.pop(k)
        rval = pd.DataFrame(rval)
        rval = rval[sorted(rval.columns)]
        rval.to_csv(path_or_buf=outfile_result,
                    sep='\t',
                    header=True,
                    index=False)

        return 0

        # deprecate train test split mode
        """searcher = _do_train_test_split_val(
            searcher, X, y, params,
            primary_scoring=primary_scoring,
            error_score=options['error_score'],
            groups=groups,
            outfile=outfile_result)"""

    # no outer split
    else:
        searcher.set_params(n_jobs=N_JOBS)
        if options['error_score'] == 'raise':
            searcher.fit(X, y, groups=groups)
        else:
            warnings.simplefilter('always', FitFailedWarning)
            with warnings.catch_warnings(record=True) as w:
                try:
                    searcher.fit(X, y, groups=groups)
                except ValueError:
                    pass
                for warning in w:
                    print(repr(warning.message))

        cv_results = pd.DataFrame(searcher.cv_results_)
        cv_results = cv_results[sorted(cv_results.columns)]
        cv_results.to_csv(path_or_buf=outfile_result,
                          sep='\t',
                          header=True,
                          index=False)

    memory.clear(warn=False)

    # output best estimator, and weights if applicable
    if outfile_object:
        best_estimator_ = getattr(searcher, 'best_estimator_', None)
        if not best_estimator_:
            warnings.warn("GridSearchCV object has no attribute "
                          "'best_estimator_', because either it's "
                          "nested gridsearch or `refit` is False!")
            return

        # clean prams
        best_estimator_ = clean_params(best_estimator_)

        main_est = get_main_estimator(best_estimator_)

        if hasattr(main_est, 'model_') \
                and hasattr(main_est, 'save_weights'):
            if outfile_weights:
                main_est.save_weights(outfile_weights)
            del main_est.model_
            del main_est.fit_params
            del main_est.model_class_
            del main_est.validation_data
            if getattr(main_est, 'data_generator_', None):
                del main_est.data_generator_

        with open(outfile_object, 'wb') as output_handler:
            print("Best estimator is saved: %s " % repr(best_estimator_))
            pickle.dump(best_estimator_, output_handler,
                        pickle.HIGHEST_PROTOCOL)
コード例 #7
0
def main(inputs, infile_estimator, infile1, infile2,
         outfile_result, outfile_object=None,
         outfile_weights=None, groups=None,
         ref_seq=None, intervals=None, targets=None,
         fasta_path=None):
    """
    Parameter
    ---------
    inputs : str
        File path to galaxy tool parameter

    infile_estimator : str
        File path to estimator

    infile1 : str
        File path to dataset containing features

    infile2 : str
        File path to dataset containing target values

    outfile_result : str
        File path to save the results, either cv_results or test result

    outfile_object : str, optional
        File path to save searchCV object

    outfile_weights : str, optional
        File path to save model weights

    groups : str
        File path to dataset containing groups labels

    ref_seq : str
        File path to dataset containing genome sequence file

    intervals : str
        File path to dataset containing interval file

    targets : str
        File path to dataset compressed target bed file

    fasta_path : str
        File path to dataset containing fasta file
    """
    warnings.simplefilter('ignore')

    with open(inputs, 'r') as param_handler:
        params = json.load(param_handler)

    params_builder = params['search_schemes']['search_params_builder']

    with open(infile_estimator, 'rb') as estimator_handler:
        estimator = load_model(estimator_handler)
    estimator_params = estimator.get_params()

    # store read dataframe object
    loaded_df = {}

    input_type = params['input_options']['selected_input']
    # tabular input
    if input_type == 'tabular':
        header = 'infer' if params['input_options']['header1'] else None
        column_option = (params['input_options']['column_selector_options_1']
                         ['selected_column_selector_option'])
        if column_option in ['by_index_number', 'all_but_by_index_number',
                             'by_header_name', 'all_but_by_header_name']:
            c = params['input_options']['column_selector_options_1']['col1']
        else:
            c = None

        df_key = infile1 + repr(header)
        df = pd.read_csv(infile1, sep='\t', header=header,
                         parse_dates=True)
        loaded_df[df_key] = df

        X = read_columns(df, c=c, c_option=column_option).astype(float)
    # sparse input
    elif input_type == 'sparse':
        X = mmread(open(infile1, 'r'))

    # fasta_file input
    elif input_type == 'seq_fasta':
        pyfaidx = get_module('pyfaidx')
        sequences = pyfaidx.Fasta(fasta_path)
        n_seqs = len(sequences.keys())
        X = np.arange(n_seqs)[:, np.newaxis]
        for param in estimator_params.keys():
            if param.endswith('fasta_path'):
                estimator.set_params(
                    **{param: fasta_path})
                break
        else:
            raise ValueError(
                "The selected estimator doesn't support "
                "fasta file input! Please consider using "
                "KerasGBatchClassifier with "
                "FastaDNABatchGenerator/FastaProteinBatchGenerator "
                "or having GenomeOneHotEncoder/ProteinOneHotEncoder "
                "in pipeline!")

    elif input_type == 'refseq_and_interval':
        path_params = {
            'data_batch_generator__ref_genome_path': ref_seq,
            'data_batch_generator__intervals_path': intervals,
            'data_batch_generator__target_path': targets
        }
        estimator.set_params(**path_params)
        n_intervals = sum(1 for line in open(intervals))
        X = np.arange(n_intervals)[:, np.newaxis]

    # Get target y
    header = 'infer' if params['input_options']['header2'] else None
    column_option = (params['input_options']['column_selector_options_2']
                     ['selected_column_selector_option2'])
    if column_option in ['by_index_number', 'all_but_by_index_number',
                         'by_header_name', 'all_but_by_header_name']:
        c = params['input_options']['column_selector_options_2']['col2']
    else:
        c = None

    df_key = infile2 + repr(header)
    if df_key in loaded_df:
        infile2 = loaded_df[df_key]
    else:
        infile2 = pd.read_csv(infile2, sep='\t',
                              header=header, parse_dates=True)
        loaded_df[df_key] = infile2

    y = read_columns(
            infile2,
            c=c,
            c_option=column_option,
            sep='\t',
            header=header,
            parse_dates=True)
    if len(y.shape) == 2 and y.shape[1] == 1:
        y = y.ravel()
    if input_type == 'refseq_and_interval':
        estimator.set_params(
            data_batch_generator__features=y.ravel().tolist())
        y = None
    # end y

    optimizer = params['search_schemes']['selected_search_scheme']
    optimizer = getattr(model_selection, optimizer)

    # handle gridsearchcv options
    options = params['search_schemes']['options']

    if groups:
        header = 'infer' if (options['cv_selector']['groups_selector']
                                    ['header_g']) else None
        column_option = (options['cv_selector']['groups_selector']
                                ['column_selector_options_g']
                                ['selected_column_selector_option_g'])
        if column_option in ['by_index_number', 'all_but_by_index_number',
                             'by_header_name', 'all_but_by_header_name']:
            c = (options['cv_selector']['groups_selector']
                        ['column_selector_options_g']['col_g'])
        else:
            c = None

        df_key = groups + repr(header)
        if df_key in loaded_df:
            groups = loaded_df[df_key]

        groups = read_columns(
                groups,
                c=c,
                c_option=column_option,
                sep='\t',
                header=header,
                parse_dates=True)
        groups = groups.ravel()
        options['cv_selector']['groups_selector'] = groups

    splitter, groups = get_cv(options.pop('cv_selector'))
    options['cv'] = splitter
    options['n_jobs'] = N_JOBS
    primary_scoring = options['scoring']['primary_scoring']
    options['scoring'] = get_scoring(options['scoring'])
    if options['error_score']:
        options['error_score'] = 'raise'
    else:
        options['error_score'] = np.NaN
    if options['refit'] and isinstance(options['scoring'], dict):
        options['refit'] = primary_scoring
    if 'pre_dispatch' in options and options['pre_dispatch'] == '':
        options['pre_dispatch'] = None

    # del loaded_df
    del loaded_df

    # handle memory
    memory = joblib.Memory(location=CACHE_DIR, verbose=0)
    # cache iraps_core fits could increase search speed significantly
    if estimator.__class__.__name__ == 'IRAPSClassifier':
        estimator.set_params(memory=memory)
    else:
        # For iraps buried in pipeline
        for p, v in estimator_params.items():
            if p.endswith('memory'):
                # for case of `__irapsclassifier__memory`
                if len(p) > 8 and p[:-8].endswith('irapsclassifier'):
                    # cache iraps_core fits could increase search
                    # speed significantly
                    new_params = {p: memory}
                    estimator.set_params(**new_params)
                # security reason, we don't want memory being
                # modified unexpectedly
                elif v:
                    new_params = {p, None}
                    estimator.set_params(**new_params)
            # For now, 1 CPU is suggested for iprasclassifier
            elif p.endswith('n_jobs'):
                new_params = {p: 1}
                estimator.set_params(**new_params)
            # for security reason, types of callbacks are limited
            elif p.endswith('callbacks'):
                for cb in v:
                    cb_type = cb['callback_selection']['callback_type']
                    if cb_type not in ALLOWED_CALLBACKS:
                        raise ValueError(
                            "Prohibited callback type: %s!" % cb_type)

    param_grid = _eval_search_params(params_builder)
    searcher = optimizer(estimator, param_grid, **options)

    # do nested split
    split_mode = params['outer_split'].pop('split_mode')
    # nested CV, outer cv using cross_validate
    if split_mode == 'nested_cv':
        outer_cv, _ = get_cv(params['outer_split']['cv_selector'])

        if options['error_score'] == 'raise':
            rval = cross_validate(
                searcher, X, y, scoring=options['scoring'],
                cv=outer_cv, n_jobs=N_JOBS, verbose=0,
                error_score=options['error_score'])
        else:
            warnings.simplefilter('always', FitFailedWarning)
            with warnings.catch_warnings(record=True) as w:
                try:
                    rval = cross_validate(
                        searcher, X, y,
                        scoring=options['scoring'],
                        cv=outer_cv, n_jobs=N_JOBS,
                        verbose=0,
                        error_score=options['error_score'])
                except ValueError:
                    pass
                for warning in w:
                    print(repr(warning.message))

        keys = list(rval.keys())
        for k in keys:
            if k.startswith('test'):
                rval['mean_' + k] = np.mean(rval[k])
                rval['std_' + k] = np.std(rval[k])
            if k.endswith('time'):
                rval.pop(k)
        rval = pd.DataFrame(rval)
        rval = rval[sorted(rval.columns)]
        rval.to_csv(path_or_buf=outfile_result, sep='\t',
                    header=True, index=False)
    else:
        if split_mode == 'train_test_split':
            train_test_split = try_get_attr(
                'galaxy_ml.model_validations', 'train_test_split')
            # make sure refit is choosen
            # this could be True for sklearn models, but not the case for
            # deep learning models
            if not options['refit'] and \
                    not all(hasattr(estimator, attr)
                            for attr in ('config', 'model_type')):
                warnings.warn("Refit is change to `True` for nested "
                              "validation!")
                setattr(searcher, 'refit', True)
            split_options = params['outer_split']

            # splits
            if split_options['shuffle'] == 'stratified':
                split_options['labels'] = y
                X, X_test, y, y_test = train_test_split(X, y, **split_options)
            elif split_options['shuffle'] == 'group':
                if groups is None:
                    raise ValueError("No group based CV option was "
                                     "choosen for group shuffle!")
                split_options['labels'] = groups
                if y is None:
                    X, X_test, groups, _ =\
                        train_test_split(X, groups, **split_options)
                else:
                    X, X_test, y, y_test, groups, _ =\
                        train_test_split(X, y, groups, **split_options)
            else:
                if split_options['shuffle'] == 'None':
                    split_options['shuffle'] = None
                X, X_test, y, y_test =\
                    train_test_split(X, y, **split_options)
        # end train_test_split

        # shared by both train_test_split and non-split
        if options['error_score'] == 'raise':
            searcher.fit(X, y, groups=groups)
        else:
            warnings.simplefilter('always', FitFailedWarning)
            with warnings.catch_warnings(record=True) as w:
                try:
                    searcher.fit(X, y, groups=groups)
                except ValueError:
                    pass
                for warning in w:
                    print(repr(warning.message))

        # no outer split
        if split_mode == 'no':
            # save results
            cv_results = pd.DataFrame(searcher.cv_results_)
            cv_results = cv_results[sorted(cv_results.columns)]
            cv_results.to_csv(path_or_buf=outfile_result, sep='\t',
                              header=True, index=False)

        # train_test_split, output test result using best_estimator_
        # or rebuild the trained estimator using weights if applicable.
        else:
            scorer_ = searcher.scorer_
            if isinstance(scorer_, collections.Mapping):
                is_multimetric = True
            else:
                is_multimetric = False

            best_estimator_ = getattr(searcher, 'best_estimator_', None)
            if not best_estimator_:
                raise ValueError("GridSearchCV object has no "
                                 "`best_estimator_` when `refit`=False!")

            if best_estimator_.__class__.__name__ == 'KerasGBatchClassifier' \
                    and hasattr(estimator.data_batch_generator, 'target_path'):
                test_score = best_estimator_.evaluate(
                    X_test, scorer=scorer_, is_multimetric=is_multimetric)
            else:
                test_score = _score(best_estimator_, X_test,
                                    y_test, scorer_,
                                    is_multimetric=is_multimetric)

            if not is_multimetric:
                test_score = {primary_scoring: test_score}
            for key, value in test_score.items():
                test_score[key] = [value]
            result_df = pd.DataFrame(test_score)
            result_df.to_csv(path_or_buf=outfile_result, sep='\t',
                             header=True, index=False)

    memory.clear(warn=False)

    if outfile_object:
        best_estimator_ = getattr(searcher, 'best_estimator_', None)
        if not best_estimator_:
            warnings.warn("GridSearchCV object has no attribute "
                          "'best_estimator_', because either it's "
                          "nested gridsearch or `refit` is False!")
            return

        main_est = best_estimator_
        if isinstance(best_estimator_, pipeline.Pipeline):
            main_est = best_estimator_.steps[-1][-1]

        if hasattr(main_est, 'model_') \
                and hasattr(main_est, 'save_weights'):
            if outfile_weights:
                main_est.save_weights(outfile_weights)
            del main_est.model_
            del main_est.fit_params
            del main_est.model_class_
            del main_est.validation_data
            if getattr(main_est, 'data_generator_', None):
                del main_est.data_generator_
                del main_est.data_batch_generator

        with open(outfile_object, 'wb') as output_handler:
            pickle.dump(best_estimator_, output_handler,
                        pickle.HIGHEST_PROTOCOL)
コード例 #8
0

#Cross-validation analysis of the model
vectoriser = HashingVectorizer(norm=None, stop_words='english', n_features=10000, decode_error='ignore')
vectoriser.fit_transform(data.data)

X = vectoriser.transform(data.data)
Y = data.target
mean_error=[]; std_error=[]
c_range = [0.001, 0.01, 0.1, 0.5, 0.75, 1.0, 5.0]
precision_score = []
precision_var = []

for c in c_range:
    svm_model = SVC(C=c, kernel='rbf', gamma=1)
    scores = cross_validate(svm_model, X, Y, cv=4, scoring=('balanced_accuracy', 'precision_micro', 'f1_micro'))
    precision_score.append(scores['test_precision_micro'].mean())
    precision_var.append(scores['test_precision_micro'].var())
    print('C = %0.3f' % c)
    print('Accuracy: [%s]' % ', '.join(map(str, scores['test_balanced_accuracy'])))
    print('Precision: [%s]' % ', '.join(map(str, scores['test_precision_micro'])))
    print('F1: [%s]' % ', '.join(map(str, scores['test_f1_micro'])))
    print('')

fig = plt.figure()
plt.plot(c_range, precision_score)
plt.errorbar(c_range, precision_score, yerr=precision_var)
plt.title('Precision Vs C')
plt.xlabel('C')
plt.xlim((0,2))
plt.ylabel('Precision')
コード例 #9
0
def main(
    inputs,
    infile_estimator,
    infile1,
    infile2,
    outfile_result,
    outfile_object=None,
    outfile_weights=None,
    groups=None,
    ref_seq=None,
    intervals=None,
    targets=None,
    fasta_path=None,
):
    """
    Parameter
    ---------
    inputs : str
        File path to galaxy tool parameter

    infile_estimator : str
        File path to estimator

    infile1 : str
        File path to dataset containing features

    infile2 : str
        File path to dataset containing target values

    outfile_result : str
        File path to save the results, either cv_results or test result

    outfile_object : str, optional
        File path to save searchCV object

    outfile_weights : str, optional
        File path to save model weights

    groups : str
        File path to dataset containing groups labels

    ref_seq : str
        File path to dataset containing genome sequence file

    intervals : str
        File path to dataset containing interval file

    targets : str
        File path to dataset compressed target bed file

    fasta_path : str
        File path to dataset containing fasta file
    """
    warnings.simplefilter("ignore")

    # store read dataframe object
    loaded_df = {}

    with open(inputs, "r") as param_handler:
        params = json.load(param_handler)

    # Override the refit parameter
    params["search_schemes"]["options"]["refit"] = (
        True if params["save"] != "nope" else False
    )

    with open(infile_estimator, "rb") as estimator_handler:
        estimator = load_model(estimator_handler)

    optimizer = params["search_schemes"]["selected_search_scheme"]
    optimizer = getattr(model_selection, optimizer)

    # handle gridsearchcv options
    options = params["search_schemes"]["options"]

    if groups:
        header = (
            "infer" if (options["cv_selector"]["groups_selector"]["header_g"]) else None
        )
        column_option = options["cv_selector"]["groups_selector"][
            "column_selector_options_g"
        ]["selected_column_selector_option_g"]
        if column_option in [
            "by_index_number",
            "all_but_by_index_number",
            "by_header_name",
            "all_but_by_header_name",
        ]:
            c = options["cv_selector"]["groups_selector"]["column_selector_options_g"][
                "col_g"
            ]
        else:
            c = None

        df_key = groups + repr(header)

        groups = pd.read_csv(groups, sep="\t", header=header, parse_dates=True)
        loaded_df[df_key] = groups

        groups = read_columns(
            groups,
            c=c,
            c_option=column_option,
            sep="\t",
            header=header,
            parse_dates=True,
        )
        groups = groups.ravel()
        options["cv_selector"]["groups_selector"] = groups

    splitter, groups = get_cv(options.pop("cv_selector"))
    options["cv"] = splitter
    primary_scoring = options["scoring"]["primary_scoring"]
    # get_scoring() expects secondary_scoring to be a comma separated string (not a list)
    # Check if secondary_scoring is specified
    secondary_scoring = options["scoring"].get("secondary_scoring", None)
    if secondary_scoring is not None:
        # If secondary_scoring is specified, convert the list into comman separated string
        options["scoring"]["secondary_scoring"] = ",".join(
            options["scoring"]["secondary_scoring"]
        )
    options["scoring"] = get_scoring(options["scoring"])
    if options["error_score"]:
        options["error_score"] = "raise"
    else:
        options["error_score"] = np.nan
    if options["refit"] and isinstance(options["scoring"], dict):
        options["refit"] = primary_scoring
    if "pre_dispatch" in options and options["pre_dispatch"] == "":
        options["pre_dispatch"] = None

    params_builder = params["search_schemes"]["search_params_builder"]
    param_grid = _eval_search_params(params_builder)

    estimator = clean_params(estimator)

    # save the SearchCV object without fit
    if params["save"] == "save_no_fit":
        searcher = optimizer(estimator, param_grid, **options)
        print(searcher)
        with open(outfile_object, "wb") as output_handler:
            pickle.dump(searcher, output_handler, pickle.HIGHEST_PROTOCOL)
        return 0

    # read inputs and loads new attributes, like paths
    estimator, X, y = _handle_X_y(
        estimator,
        params,
        infile1,
        infile2,
        loaded_df=loaded_df,
        ref_seq=ref_seq,
        intervals=intervals,
        targets=targets,
        fasta_path=fasta_path,
    )

    # cache iraps_core fits could increase search speed significantly
    memory = joblib.Memory(location=CACHE_DIR, verbose=0)
    main_est = get_main_estimator(estimator)
    if main_est.__class__.__name__ == "IRAPSClassifier":
        main_est.set_params(memory=memory)

    searcher = optimizer(estimator, param_grid, **options)

    split_mode = params["outer_split"].pop("split_mode")

    if split_mode == "nested_cv":
        # make sure refit is choosen
        # this could be True for sklearn models, but not the case for
        # deep learning models
        if not options["refit"] and not all(
            hasattr(estimator, attr) for attr in ("config", "model_type")
        ):
            warnings.warn("Refit is change to `True` for nested validation!")
            setattr(searcher, "refit", True)

        outer_cv, _ = get_cv(params["outer_split"]["cv_selector"])
        # nested CV, outer cv using cross_validate
        if options["error_score"] == "raise":
            rval = cross_validate(
                searcher,
                X,
                y,
                scoring=options["scoring"],
                cv=outer_cv,
                n_jobs=N_JOBS,
                verbose=options["verbose"],
                return_estimator=(params["save"] == "save_estimator"),
                error_score=options["error_score"],
                return_train_score=True,
            )
        else:
            warnings.simplefilter("always", FitFailedWarning)
            with warnings.catch_warnings(record=True) as w:
                try:
                    rval = cross_validate(
                        searcher,
                        X,
                        y,
                        scoring=options["scoring"],
                        cv=outer_cv,
                        n_jobs=N_JOBS,
                        verbose=options["verbose"],
                        return_estimator=(params["save"] == "save_estimator"),
                        error_score=options["error_score"],
                        return_train_score=True,
                    )
                except ValueError:
                    pass
                for warning in w:
                    print(repr(warning.message))

        fitted_searchers = rval.pop("estimator", [])
        if fitted_searchers:
            import os

            pwd = os.getcwd()
            save_dir = os.path.join(pwd, "cv_results_in_folds")
            try:
                os.mkdir(save_dir)
                for idx, obj in enumerate(fitted_searchers):
                    target_name = "cv_results_" + "_" + "split%d" % idx
                    target_path = os.path.join(pwd, save_dir, target_name)
                    cv_results_ = getattr(obj, "cv_results_", None)
                    if not cv_results_:
                        print("%s is not available" % target_name)
                        continue
                    cv_results_ = pd.DataFrame(cv_results_)
                    cv_results_ = cv_results_[sorted(cv_results_.columns)]
                    cv_results_.to_csv(target_path, sep="\t", header=True, index=False)
            except Exception as e:
                print(e)
            finally:
                del os

        keys = list(rval.keys())
        for k in keys:
            if k.startswith("test"):
                rval["mean_" + k] = np.mean(rval[k])
                rval["std_" + k] = np.std(rval[k])
            if k.endswith("time"):
                rval.pop(k)
        rval = pd.DataFrame(rval)
        rval = rval[sorted(rval.columns)]
        rval.to_csv(path_or_buf=outfile_result, sep="\t", header=True, index=False)
        # deprecate train test split mode
        """searcher = _do_train_test_split_val(
            searcher, X, y, params,
            primary_scoring=primary_scoring,
            error_score=options['error_score'],
            groups=groups,
            outfile=outfile_result)"""
        return 0

    # no outer split
    else:
        searcher.set_params(n_jobs=N_JOBS)
        if options["error_score"] == "raise":
            searcher.fit(X, y, groups=groups)
        else:
            warnings.simplefilter("always", FitFailedWarning)
            with warnings.catch_warnings(record=True) as w:
                try:
                    searcher.fit(X, y, groups=groups)
                except ValueError:
                    pass
                for warning in w:
                    print(repr(warning.message))

        cv_results = pd.DataFrame(searcher.cv_results_)
        cv_results = cv_results[sorted(cv_results.columns)]
        cv_results.to_csv(
            path_or_buf=outfile_result, sep="\t", header=True, index=False
        )

    memory.clear(warn=False)

    # output best estimator, and weights if applicable
    if outfile_object:
        best_estimator_ = getattr(searcher, "best_estimator_", None)
        if not best_estimator_:
            warnings.warn(
                "GridSearchCV object has no attribute "
                "'best_estimator_', because either it's "
                "nested gridsearch or `refit` is False!"
            )
            return

        # clean prams
        best_estimator_ = clean_params(best_estimator_)

        main_est = get_main_estimator(best_estimator_)

        if hasattr(main_est, "model_") and hasattr(main_est, "save_weights"):
            if outfile_weights:
                main_est.save_weights(outfile_weights)
            del main_est.model_
            del main_est.fit_params
            del main_est.model_class_
            del main_est.validation_data
            if getattr(main_est, "data_generator_", None):
                del main_est.data_generator_

        with open(outfile_object, "wb") as output_handler:
            print("Best estimator is saved: %s " % repr(best_estimator_))
            pickle.dump(best_estimator_, output_handler, pickle.HIGHEST_PROTOCOL)
コード例 #10
0
def main(inputs,
         infile_estimator,
         infile1,
         infile2,
         outfile_result,
         outfile_object=None,
         groups=None,
         ref_seq=None,
         intervals=None,
         targets=None,
         fasta_path=None):
    """
    Parameter
    ---------
    inputs : str
        File path to galaxy tool parameter.

    infile_estimator : str
        File path to estimator.

    infile1 : str
        File path to dataset containing features

    infile2 : str
        File path to dataset containing target values

    outfile_result : str
        File path to save the results, either cv_results or test result

    outfile_object : str, optional
        File path to save searchCV object

    groups : str
        File path to dataset containing groups labels

    ref_seq : str
        File path to dataset containing genome sequence file

    intervals : str
        File path to dataset containing interval file

    targets : str
        File path to dataset compressed target bed file

    fasta_path : str
        File path to dataset containing fasta file
    """
    warnings.simplefilter('ignore')

    # store read dataframe object
    loaded_df = {}

    with open(inputs, 'r') as param_handler:
        params = json.load(param_handler)

    # Override the refit parameter
    params['search_schemes']['options']['refit'] = True \
        if (params['save'] != 'nope' or
            params['outer_split']['split_mode'] == 'nested_cv') else False

    estimator = load_model_from_h5(infile_estimator)

    estimator = clean_params(estimator)

    if estimator.__class__.__name__ == 'KerasGBatchClassifier':
        _fit_and_score = try_get_attr('galaxy_ml.model_validations',
                                      '_fit_and_score')

        setattr(_search, '_fit_and_score', _fit_and_score)
        setattr(_validation, '_fit_and_score', _fit_and_score)

    optimizer = params['search_schemes']['selected_search_scheme']
    if optimizer == 'skopt.BayesSearchCV':
        optimizer = BayesSearchCV
    else:
        optimizer = getattr(model_selection, optimizer)

    # handle gridsearchcv options
    options = params['search_schemes']['options']

    if groups:
        header = 'infer' if (
            options['cv_selector']['groups_selector']['header_g']) else None
        column_option = (
            options['cv_selector']['groups_selector']
            ['column_selector_options_g']['selected_column_selector_option_g'])
        if column_option in [
                'by_index_number', 'all_but_by_index_number', 'by_header_name',
                'all_but_by_header_name'
        ]:
            c = (options['cv_selector']['groups_selector']
                 ['column_selector_options_g']['col_g'])
        else:
            c = None

        df_key = groups + repr(header)

        groups = pd.read_csv(groups, sep='\t', header=header, parse_dates=True)
        loaded_df[df_key] = groups

        groups = read_columns(groups,
                              c=c,
                              c_option=column_option,
                              sep='\t',
                              header=header,
                              parse_dates=True)
        groups = groups.ravel()
        options['cv_selector']['groups_selector'] = groups

    cv_selector = options.pop('cv_selector')
    if Version(galaxy_ml_version) < Version('0.8.3'):
        cv_selector.pop('n_stratification_bins', None)
    splitter, groups = get_cv(cv_selector)
    options['cv'] = splitter
    primary_scoring = options['scoring']['primary_scoring']
    options['scoring'] = get_scoring(options['scoring'])
    # TODO make BayesSearchCV support multiple scoring
    if optimizer == 'skopt.BayesSearchCV' and \
            isinstance(options['scoring'], dict):
        options['scoring'] = options['scoring'][primary_scoring]
        warnings.warn("BayesSearchCV doesn't support multiple "
                      "scorings! Primary scoring is used.")
    if options['error_score']:
        options['error_score'] = 'raise'
    else:
        options['error_score'] = np.NaN
    if options['refit'] and isinstance(options['scoring'], dict):
        options['refit'] = primary_scoring
    if 'pre_dispatch' in options and options['pre_dispatch'] == '':
        options['pre_dispatch'] = None

    params_builder = params['search_schemes']['search_params_builder']
    param_grid = _eval_search_params(params_builder)

    # save the SearchCV object without fit
    if params['save'] == 'save_no_fit':
        searcher = optimizer(estimator, param_grid, **options)
        dump_model_to_h5(searcher, outfile_object)
        return 0

    # read inputs and loads new attributes, like paths
    estimator, X, y = _handle_X_y(estimator,
                                  params,
                                  infile1,
                                  infile2,
                                  loaded_df=loaded_df,
                                  ref_seq=ref_seq,
                                  intervals=intervals,
                                  targets=targets,
                                  fasta_path=fasta_path)

    # cache iraps_core fits could increase search speed significantly
    memory = joblib.Memory(location=CACHE_DIR, verbose=0)
    estimator = _set_memory(estimator, memory)

    searcher = optimizer(estimator, param_grid, **options)

    split_mode = params['outer_split'].pop('split_mode')

    # Nested CV
    if split_mode == 'nested_cv':
        cv_selector = params['outer_split']['cv_selector']
        if Version(galaxy_ml_version) < Version('0.8.3'):
            cv_selector.pop('n_stratification_bins', None)
        outer_cv, _ = get_cv(cv_selector)
        # nested CV, outer cv using cross_validate
        if options['error_score'] == 'raise':
            rval = cross_validate(
                searcher,
                X,
                y,
                groups=groups,
                scoring=options['scoring'],
                cv=outer_cv,
                n_jobs=N_JOBS,
                verbose=options['verbose'],
                fit_params={'groups': groups},
                return_estimator=(params['save'] == 'save_estimator'),
                error_score=options['error_score'],
                return_train_score=True)
        else:
            warnings.simplefilter('always', FitFailedWarning)
            with warnings.catch_warnings(record=True) as w:
                try:
                    rval = cross_validate(
                        searcher,
                        X,
                        y,
                        groups=groups,
                        scoring=options['scoring'],
                        cv=outer_cv,
                        n_jobs=N_JOBS,
                        verbose=options['verbose'],
                        fit_params={'groups': groups},
                        return_estimator=(params['save'] == 'save_estimator'),
                        error_score=options['error_score'],
                        return_train_score=True)
                except ValueError:
                    pass
                for warning in w:
                    print(repr(warning.message))

        fitted_searchers = rval.pop('estimator', [])
        if fitted_searchers:
            import os
            pwd = os.getcwd()
            save_dir = os.path.join(pwd, 'cv_results_in_folds')
            try:
                os.mkdir(save_dir)
                for idx, obj in enumerate(fitted_searchers):
                    target_name = 'cv_results_' + '_' + 'split%d' % idx
                    target_path = os.path.join(pwd, save_dir, target_name)
                    cv_results_ = getattr(obj, 'cv_results_', None)
                    if not cv_results_:
                        print("%s is not available" % target_name)
                        continue
                    cv_results_ = pd.DataFrame(cv_results_)
                    cv_results_ = cv_results_[sorted(cv_results_.columns)]
                    cv_results_.to_csv(target_path,
                                       sep='\t',
                                       header=True,
                                       index=False)
            except Exception as e:
                print(e)
            finally:
                del os

        keys = list(rval.keys())
        for k in keys:
            if k.startswith('test'):
                rval['mean_' + k] = np.mean(rval[k])
                rval['std_' + k] = np.std(rval[k])
            if k.endswith('time'):
                rval.pop(k)
        rval = pd.DataFrame(rval)
        rval = rval[sorted(rval.columns)]
        rval.to_csv(path_or_buf=outfile_result,
                    sep='\t',
                    header=True,
                    index=False)

        return 0

        # deprecate train test split mode
        """searcher = _do_train_test_split_val(
            searcher, X, y, params,
            primary_scoring=primary_scoring,
            error_score=options['error_score'],
            groups=groups,
            outfile=outfile_result)"""

    # no outer split
    else:
        searcher.set_params(n_jobs=N_JOBS)
        if options['error_score'] == 'raise':
            searcher.fit(X, y, groups=groups)
        else:
            warnings.simplefilter('always', FitFailedWarning)
            with warnings.catch_warnings(record=True) as w:
                try:
                    searcher.fit(X, y, groups=groups)
                except ValueError:
                    pass
                for warning in w:
                    print(repr(warning.message))

        cv_results = pd.DataFrame(searcher.cv_results_)
        cv_results = cv_results[sorted(cv_results.columns)]
        cv_results.to_csv(path_or_buf=outfile_result,
                          sep='\t',
                          header=True,
                          index=False)

    memory.clear(warn=False)

    # output best estimator, and weights if applicable
    if outfile_object:
        best_estimator_ = getattr(searcher, 'best_estimator_', None)
        if not best_estimator_:
            warnings.warn("GridSearchCV object has no attribute "
                          "'best_estimator_', because either it's "
                          "nested gridsearch or `refit` is False!")
            return

        print("Saving best estimator: %s " % repr(best_estimator_))
        dump_model_to_h5(best_estimator_, outfile_object)
コード例 #11
0
def main(inputs, infile_estimator, infile1, infile2,
         outfile_result, outfile_object=None, groups=None):
    """
    Parameter
    ---------
    inputs : str
        File path to galaxy tool parameter

    infile_estimator : str
        File path to estimator

    infile1 : str
        File path to dataset containing features

    infile2 : str
        File path to dataset containing target values

    outfile_result : str
        File path to save the results, either cv_results or test result

    outfile_object : str, optional
        File path to save searchCV object

    groups : str
        File path to dataset containing groups labels
    """

    warnings.simplefilter('ignore')

    with open(inputs, 'r') as param_handler:
        params = json.load(param_handler)
    if groups:
        (params['search_schemes']['options']['cv_selector']
         ['groups_selector']['infile_g']) = groups

    params_builder = params['search_schemes']['search_params_builder']

    input_type = params['input_options']['selected_input']
    if input_type == 'tabular':
        header = 'infer' if params['input_options']['header1'] else None
        column_option = (params['input_options']['column_selector_options_1']
                         ['selected_column_selector_option'])
        if column_option in ['by_index_number', 'all_but_by_index_number',
                             'by_header_name', 'all_but_by_header_name']:
            c = params['input_options']['column_selector_options_1']['col1']
        else:
            c = None
        X = read_columns(
                infile1,
                c=c,
                c_option=column_option,
                sep='\t',
                header=header,
                parse_dates=True).astype(float)
    else:
        X = mmread(open(infile1, 'r'))

    header = 'infer' if params['input_options']['header2'] else None
    column_option = (params['input_options']['column_selector_options_2']
                     ['selected_column_selector_option2'])
    if column_option in ['by_index_number', 'all_but_by_index_number',
                         'by_header_name', 'all_but_by_header_name']:
        c = params['input_options']['column_selector_options_2']['col2']
    else:
        c = None
    y = read_columns(
            infile2,
            c=c,
            c_option=column_option,
            sep='\t',
            header=header,
            parse_dates=True)
    if len(y.shape) == 2 and y.shape[1] == 1:
        y = y.ravel()

    optimizer = params['search_schemes']['selected_search_scheme']
    optimizer = getattr(model_selection, optimizer)

    # handle gridsearchcv options
    options = params['search_schemes']['options']

    splitter, groups = get_cv(options.pop('cv_selector'))
    options['cv'] = splitter
    options['n_jobs'] = N_JOBS
    primary_scoring = options['scoring']['primary_scoring']
    options['scoring'] = get_scoring(options['scoring'])
    if options['error_score']:
        options['error_score'] = 'raise'
    else:
        options['error_score'] = np.NaN
    if options['refit'] and isinstance(options['scoring'], dict):
        options['refit'] = primary_scoring
    if 'pre_dispatch' in options and options['pre_dispatch'] == '':
        options['pre_dispatch'] = None

    with open(infile_estimator, 'rb') as estimator_handler:
        estimator = load_model(estimator_handler)

    # handle memory
    memory = joblib.Memory(location=CACHE_DIR, verbose=0)
    # cache iraps_core fits could increase search speed significantly
    if estimator.__class__.__name__ == 'IRAPSClassifier':
        estimator.set_params(memory=memory)
    else:
        # For iraps buried in pipeline
        for p, v in estimator.get_params().items():
            if p.endswith('memory'):
                # for case of `__irapsclassifier__memory`
                if len(p) > 8 and p[:-8].endswith('irapsclassifier'):
                    # cache iraps_core fits could increase search
                    # speed significantly
                    new_params = {p: memory}
                    estimator.set_params(**new_params)
                # security reason, we don't want memory being
                # modified unexpectedly
                elif v:
                    new_params = {p, None}
                    estimator.set_params(**new_params)
            # For now, 1 CPU is suggested for iprasclassifier
            elif p.endswith('n_jobs'):
                new_params = {p: 1}
                estimator.set_params(**new_params)

    param_grid = _eval_search_params(params_builder)
    searcher = optimizer(estimator, param_grid, **options)

    # do nested split
    split_mode = params['outer_split'].pop('split_mode')
    # nested CV, outer cv using cross_validate
    if split_mode == 'nested_cv':
        outer_cv, _ = get_cv(params['outer_split']['cv_selector'])

        if options['error_score'] == 'raise':
            rval = cross_validate(
                searcher, X, y, scoring=options['scoring'],
                cv=outer_cv, n_jobs=N_JOBS, verbose=0,
                error_score=options['error_score'])
        else:
            warnings.simplefilter('always', FitFailedWarning)
            with warnings.catch_warnings(record=True) as w:
                try:
                    rval = cross_validate(
                        searcher, X, y,
                        scoring=options['scoring'],
                        cv=outer_cv, n_jobs=N_JOBS,
                        verbose=0,
                        error_score=options['error_score'])
                except ValueError:
                    pass
                for warning in w:
                    print(repr(warning.message))

        keys = list(rval.keys())
        for k in keys:
            if k.startswith('test'):
                rval['mean_' + k] = np.mean(rval[k])
                rval['std_' + k] = np.std(rval[k])
            if k.endswith('time'):
                rval.pop(k)
        rval = pandas.DataFrame(rval)
        rval = rval[sorted(rval.columns)]
        rval.to_csv(path_or_buf=outfile_result, sep='\t',
                    header=True, index=False)
    else:
        if split_mode == 'train_test_split':
            train_test_split = try_get_attr(
                'model_validations', 'train_test_split')
            # make sure refit is choosen
            if not options['refit']:
                raise ValueError("Refit must be `True` for shuffle splitting!")
            split_options = params['outer_split']

            # splits
            if split_options['shuffle'] == 'stratified':
                split_options['labels'] = y
                X, X_test, y, y_test = train_test_split(X, y, **split_options)
            elif split_options['shuffle'] == 'group':
                if not groups:
                    raise ValueError("No group based CV option was "
                                     "choosen for group shuffle!")
                split_options['labels'] = groups
                X, X_test, y, y_test, groups, _ =\
                    train_test_split(X, y, **split_options)
            else:
                if split_options['shuffle'] == 'None':
                    split_options['shuffle'] = None
                X, X_test, y, y_test =\
                    train_test_split(X, y, **split_options)
        # end train_test_split

        # shared by both train_test_split and non-split
        if options['error_score'] == 'raise':
            searcher.fit(X, y, groups=groups)
        else:
            warnings.simplefilter('always', FitFailedWarning)
            with warnings.catch_warnings(record=True) as w:
                try:
                    searcher.fit(X, y, groups=groups)
                except ValueError:
                    pass
                for warning in w:
                    print(repr(warning.message))

        # no outer split
        if split_mode == 'no':
            # save results
            cv_results = pandas.DataFrame(searcher.cv_results_)
            cv_results = cv_results[sorted(cv_results.columns)]
            cv_results.to_csv(path_or_buf=outfile_result, sep='\t',
                              header=True, index=False)

        # train_test_split, output test result using best_estimator_
        else:
            best_estimator_ = searcher.best_estimator_
            scorer_ = searcher.scorer_
            if isinstance(scorer_, collections.Mapping):
                is_multimetric = True
            else:
                is_multimetric = False

            test_score = _score(best_estimator_, X_test,
                                y_test, scorer_,
                                is_multimetric=is_multimetric)
            if not is_multimetric:
                test_score = {primary_scoring: test_score}
            for key, value in test_score.items():
                test_score[key] = [value]
            result_df = pandas.DataFrame(test_score)
            result_df.to_csv(path_or_buf=outfile_result, sep='\t',
                             header=True, index=False)

    memory.clear(warn=False)

    if outfile_object:
        with open(outfile_object, 'wb') as output_handler:
            pickle.dump(searcher, output_handler, pickle.HIGHEST_PROTOCOL)