Пример #1
0
def main(inputs,
         infile_estimator,
         infile1,
         infile2,
         outfile_result,
         outfile_object=None,
         outfile_weights=None,
         groups=None,
         ref_seq=None,
         intervals=None,
         targets=None,
         fasta_path=None):
    """
    Parameter
    ---------
    inputs : str
        File path to galaxy tool parameter

    infile_estimator : str
        File path to estimator

    infile1 : str
        File path to dataset containing features

    infile2 : str
        File path to dataset containing target values

    outfile_result : str
        File path to save the results, either cv_results or test result

    outfile_object : str, optional
        File path to save searchCV object

    outfile_weights : str, optional
        File path to save model weights

    groups : str
        File path to dataset containing groups labels

    ref_seq : str
        File path to dataset containing genome sequence file

    intervals : str
        File path to dataset containing interval file

    targets : str
        File path to dataset compressed target bed file

    fasta_path : str
        File path to dataset containing fasta file
    """
    warnings.simplefilter('ignore')

    # store read dataframe object
    loaded_df = {}

    with open(inputs, 'r') as param_handler:
        params = json.load(param_handler)

    # Override the refit parameter
    params['search_schemes']['options']['refit'] = True \
        if params['save'] != 'nope' else False

    with open(infile_estimator, 'rb') as estimator_handler:
        estimator = load_model(estimator_handler)

    optimizer = params['search_schemes']['selected_search_scheme']
    optimizer = getattr(model_selection, optimizer)

    # handle gridsearchcv options
    options = params['search_schemes']['options']

    if groups:
        header = 'infer' if (
            options['cv_selector']['groups_selector']['header_g']) else None
        column_option = (
            options['cv_selector']['groups_selector']
            ['column_selector_options_g']['selected_column_selector_option_g'])
        if column_option in [
                'by_index_number', 'all_but_by_index_number', 'by_header_name',
                'all_but_by_header_name'
        ]:
            c = (options['cv_selector']['groups_selector']
                 ['column_selector_options_g']['col_g'])
        else:
            c = None

        df_key = groups + repr(header)

        groups = pd.read_csv(groups, sep='\t', header=header, parse_dates=True)
        loaded_df[df_key] = groups

        groups = read_columns(groups,
                              c=c,
                              c_option=column_option,
                              sep='\t',
                              header=header,
                              parse_dates=True)
        groups = groups.ravel()
        options['cv_selector']['groups_selector'] = groups

    splitter, groups = get_cv(options.pop('cv_selector'))
    options['cv'] = splitter
    primary_scoring = options['scoring']['primary_scoring']
    options['scoring'] = get_scoring(options['scoring'])
    if options['error_score']:
        options['error_score'] = 'raise'
    else:
        options['error_score'] = np.NaN
    if options['refit'] and isinstance(options['scoring'], dict):
        options['refit'] = primary_scoring
    if 'pre_dispatch' in options and options['pre_dispatch'] == '':
        options['pre_dispatch'] = None

    params_builder = params['search_schemes']['search_params_builder']
    param_grid = _eval_search_params(params_builder)

    estimator = clean_params(estimator)

    # save the SearchCV object without fit
    if params['save'] == 'save_no_fit':
        searcher = optimizer(estimator, param_grid, **options)
        print(searcher)
        with open(outfile_object, 'wb') as output_handler:
            pickle.dump(searcher, output_handler, pickle.HIGHEST_PROTOCOL)
        return 0

    # read inputs and loads new attributes, like paths
    estimator, X, y = _handle_X_y(estimator,
                                  params,
                                  infile1,
                                  infile2,
                                  loaded_df=loaded_df,
                                  ref_seq=ref_seq,
                                  intervals=intervals,
                                  targets=targets,
                                  fasta_path=fasta_path)

    # cache iraps_core fits could increase search speed significantly
    memory = joblib.Memory(location=CACHE_DIR, verbose=0)
    main_est = get_main_estimator(estimator)
    if main_est.__class__.__name__ == 'IRAPSClassifier':
        main_est.set_params(memory=memory)

    searcher = optimizer(estimator, param_grid, **options)

    split_mode = params['outer_split'].pop('split_mode')

    if split_mode == 'nested_cv':
        # make sure refit is choosen
        # this could be True for sklearn models, but not the case for
        # deep learning models
        if not options['refit'] and \
                not all(hasattr(estimator, attr)
                        for attr in ('config', 'model_type')):
            warnings.warn("Refit is change to `True` for nested validation!")
            setattr(searcher, 'refit', True)

        outer_cv, _ = get_cv(params['outer_split']['cv_selector'])
        # nested CV, outer cv using cross_validate
        if options['error_score'] == 'raise':
            rval = cross_validate(
                searcher,
                X,
                y,
                scoring=options['scoring'],
                cv=outer_cv,
                n_jobs=N_JOBS,
                verbose=options['verbose'],
                return_estimator=(params['save'] == 'save_estimator'),
                error_score=options['error_score'],
                return_train_score=True)
        else:
            warnings.simplefilter('always', FitFailedWarning)
            with warnings.catch_warnings(record=True) as w:
                try:
                    rval = cross_validate(
                        searcher,
                        X,
                        y,
                        scoring=options['scoring'],
                        cv=outer_cv,
                        n_jobs=N_JOBS,
                        verbose=options['verbose'],
                        return_estimator=(params['save'] == 'save_estimator'),
                        error_score=options['error_score'],
                        return_train_score=True)
                except ValueError:
                    pass
                for warning in w:
                    print(repr(warning.message))

        fitted_searchers = rval.pop('estimator', [])
        if fitted_searchers:
            import os
            pwd = os.getcwd()
            save_dir = os.path.join(pwd, 'cv_results_in_folds')
            try:
                os.mkdir(save_dir)
                for idx, obj in enumerate(fitted_searchers):
                    target_name = 'cv_results_' + '_' + 'split%d' % idx
                    target_path = os.path.join(pwd, save_dir, target_name)
                    cv_results_ = getattr(obj, 'cv_results_', None)
                    if not cv_results_:
                        print("%s is not available" % target_name)
                        continue
                    cv_results_ = pd.DataFrame(cv_results_)
                    cv_results_ = cv_results_[sorted(cv_results_.columns)]
                    cv_results_.to_csv(target_path,
                                       sep='\t',
                                       header=True,
                                       index=False)
            except Exception as e:
                print(e)
            finally:
                del os

        keys = list(rval.keys())
        for k in keys:
            if k.startswith('test'):
                rval['mean_' + k] = np.mean(rval[k])
                rval['std_' + k] = np.std(rval[k])
            if k.endswith('time'):
                rval.pop(k)
        rval = pd.DataFrame(rval)
        rval = rval[sorted(rval.columns)]
        rval.to_csv(path_or_buf=outfile_result,
                    sep='\t',
                    header=True,
                    index=False)

        return 0

        # deprecate train test split mode
        """searcher = _do_train_test_split_val(
            searcher, X, y, params,
            primary_scoring=primary_scoring,
            error_score=options['error_score'],
            groups=groups,
            outfile=outfile_result)"""

    # no outer split
    else:
        searcher.set_params(n_jobs=N_JOBS)
        if options['error_score'] == 'raise':
            searcher.fit(X, y, groups=groups)
        else:
            warnings.simplefilter('always', FitFailedWarning)
            with warnings.catch_warnings(record=True) as w:
                try:
                    searcher.fit(X, y, groups=groups)
                except ValueError:
                    pass
                for warning in w:
                    print(repr(warning.message))

        cv_results = pd.DataFrame(searcher.cv_results_)
        cv_results = cv_results[sorted(cv_results.columns)]
        cv_results.to_csv(path_or_buf=outfile_result,
                          sep='\t',
                          header=True,
                          index=False)

    memory.clear(warn=False)

    # output best estimator, and weights if applicable
    if outfile_object:
        best_estimator_ = getattr(searcher, 'best_estimator_', None)
        if not best_estimator_:
            warnings.warn("GridSearchCV object has no attribute "
                          "'best_estimator_', because either it's "
                          "nested gridsearch or `refit` is False!")
            return

        # clean prams
        best_estimator_ = clean_params(best_estimator_)

        main_est = get_main_estimator(best_estimator_)

        if hasattr(main_est, 'model_') \
                and hasattr(main_est, 'save_weights'):
            if outfile_weights:
                main_est.save_weights(outfile_weights)
            del main_est.model_
            del main_est.fit_params
            del main_est.model_class_
            del main_est.validation_data
            if getattr(main_est, 'data_generator_', None):
                del main_est.data_generator_

        with open(outfile_object, 'wb') as output_handler:
            print("Best estimator is saved: %s " % repr(best_estimator_))
            pickle.dump(best_estimator_, output_handler,
                        pickle.HIGHEST_PROTOCOL)
Пример #2
0
def main(inputs_path, output_obj, base_paths=None, meta_path=None):
    """
    Parameter
    ---------
    inputs_path : str
        File path for Galaxy parameters

    output_obj : str
        File path for ensemble estimator ouput

    base_paths : str
        File path or paths concatenated by comma.

    meta_path : str
        File path
    """
    with open(inputs_path, 'r') as param_handler:
        params = json.load(param_handler)

    estimator_type = params['algo_selection']['estimator_type']
    # get base estimators
    base_estimators = []
    for idx, base_file in enumerate(base_paths.split(',')):
        if base_file and base_file != 'None':
            model = load_model_from_h5(base_file)
        else:
            estimator_json = (
                params['base_est_builder'][idx]['estimator_selector'])
            model = get_estimator(estimator_json)

        if estimator_type.startswith('sklearn'):
            named = model.__class__.__name__.lower()
            named = 'base_%d_%s' % (idx, named)
            base_estimators.append((named, model))
        else:
            base_estimators.append(model)

    # get meta estimator, if applicable
    if estimator_type.startswith('mlxtend'):
        if meta_path:
            meta_estimator = load_model_from_h5(meta_path)
        else:
            estimator_json = (params['algo_selection']['meta_estimator']
                              ['estimator_selector'])
            meta_estimator = get_estimator(estimator_json)

    options = params['algo_selection']['options']

    cv_selector = options.pop('cv_selector', None)
    if cv_selector:
        if Version(galaxy_ml_version) < Version('0.8.3'):
            cv_selector.pop('n_stratification_bins', None)
        splitter, groups = get_cv(cv_selector)
        options['cv'] = splitter
        # set n_jobs
        options['n_jobs'] = N_JOBS

    weights = options.pop('weights', None)
    if weights:
        weights = ast.literal_eval(weights)
        if weights:
            options['weights'] = weights

    mod_and_name = estimator_type.split('_')
    mod = sys.modules[mod_and_name[0]]
    klass = getattr(mod, mod_and_name[1])

    if estimator_type.startswith('sklearn'):
        options['n_jobs'] = N_JOBS
        ensemble_estimator = klass(base_estimators, **options)

    elif mod == mlxtend.classifier:
        ensemble_estimator = klass(classifiers=base_estimators,
                                   meta_classifier=meta_estimator,
                                   **options)

    else:
        ensemble_estimator = klass(regressors=base_estimators,
                                   meta_regressor=meta_estimator,
                                   **options)

    print(ensemble_estimator)
    for base_est in base_estimators:
        print(base_est)

    dump_model_to_h5(ensemble_estimator, output_obj)
Пример #3
0
def main(inputs_path,
         output_obj,
         base_paths=None,
         meta_path=None,
         outfile_params=None):
    """
    Parameter
    ---------
    inputs_path : str
        File path for Galaxy parameters

    output_obj : str
        File path for ensemble estimator ouput

    base_paths : str
        File path or paths concatenated by comma.

    meta_path : str
        File path

    outfile_params : str
        File path for params output
    """
    with open(inputs_path, 'r') as param_handler:
        params = json.load(param_handler)

    estimator_type = params['algo_selection']['estimator_type']
    # get base estimators
    base_estimators = []
    for idx, base_file in enumerate(base_paths.split(',')):
        if base_file and base_file != 'None':
            with open(base_file, 'rb') as handler:
                model = load_model(handler)
        else:
            estimator_json = (
                params['base_est_builder'][idx]['estimator_selector'])
            model = get_estimator(estimator_json)

        if estimator_type.startswith('sklearn'):
            named = model.__class__.__name__.lower()
            named = 'base_%d_%s' % (idx, named)
            base_estimators.append((named, model))
        else:
            base_estimators.append(model)

    # get meta estimator, if applicable
    if estimator_type.startswith('mlxtend'):
        if meta_path:
            with open(meta_path, 'rb') as f:
                meta_estimator = load_model(f)
        else:
            estimator_json = (params['algo_selection']['meta_estimator']
                              ['estimator_selector'])
            meta_estimator = get_estimator(estimator_json)

    options = params['algo_selection']['options']

    cv_selector = options.pop('cv_selector', None)
    if cv_selector:
        splitter, groups = get_cv(cv_selector)
        options['cv'] = splitter
        # set n_jobs
        options['n_jobs'] = N_JOBS

    weights = options.pop('weights', None)
    if weights:
        weights = ast.literal_eval(weights)
        if weights:
            options['weights'] = weights

    mod_and_name = estimator_type.split('_')
    mod = sys.modules[mod_and_name[0]]
    klass = getattr(mod, mod_and_name[1])

    if estimator_type.startswith('sklearn'):
        options['n_jobs'] = N_JOBS
        ensemble_estimator = klass(base_estimators, **options)

    elif mod == mlxtend.classifier:
        ensemble_estimator = klass(classifiers=base_estimators,
                                   meta_classifier=meta_estimator,
                                   **options)

    else:
        ensemble_estimator = klass(regressors=base_estimators,
                                   meta_regressor=meta_estimator,
                                   **options)

    print(ensemble_estimator)
    for base_est in base_estimators:
        print(base_est)

    with open(output_obj, 'wb') as out_handler:
        pickle.dump(ensemble_estimator, out_handler, pickle.HIGHEST_PROTOCOL)

    if params['get_params'] and outfile_params:
        results = get_search_params(ensemble_estimator)
        df = pd.DataFrame(results, columns=['', 'Parameter', 'Value'])
        df.to_csv(outfile_params, sep='\t', index=False)
Пример #4
0
def _get_single_cv_split(params,
                         array,
                         infile_labels=None,
                         infile_groups=None):
    """ output (train, test) subset from a cv splitter

    Parameters
    ----------
    params : dict
        Galaxy tool inputs
    array : pandas DataFrame object
        The target dataset to split
    infile_labels : str
        File path to dataset containing target values
    infile_groups : str
        File path to dataset containing group values
    """
    y = None
    groups = None

    nth_split = params['mode_selection']['nth_split']

    # read groups
    if infile_groups:
        header = 'infer' if (params['mode_selection']['cv_selector']
                             ['groups_selector']['header_g']) else None
        column_option = (
            params['mode_selection']['cv_selector']['groups_selector']
            ['column_selector_options_g']['selected_column_selector_option_g'])
        if column_option in [
                'by_index_number', 'all_but_by_index_number', 'by_header_name',
                'all_but_by_header_name'
        ]:
            c = (params['mode_selection']['cv_selector']['groups_selector']
                 ['column_selector_options_g']['col_g'])
        else:
            c = None

        groups = read_columns(infile_groups,
                              c=c,
                              c_option=column_option,
                              sep='\t',
                              header=header,
                              parse_dates=True)
        groups = groups.ravel()

        params['mode_selection']['cv_selector']['groups_selector'] = groups

    # read labels
    if infile_labels:
        target_input = (
            params['mode_selection']['cv_selector'].pop('target_input'))
        header = 'infer' if target_input['header1'] else None
        col_index = target_input['col'][0] - 1
        df = pd.read_csv(infile_labels,
                         sep='\t',
                         header=header,
                         parse_dates=True)
        y = df.iloc[:, col_index].values

    # construct the cv splitter object
    splitter, groups = get_cv(params['mode_selection']['cv_selector'])

    total_n_splits = splitter.get_n_splits(array.values, y=y, groups=groups)
    if nth_split > total_n_splits:
        raise ValueError("Total number of splits is {}, but got `nth_split` "
                         "= {}".format(total_n_splits, nth_split))

    i = 1
    for train_index, test_index in splitter.split(array.values,
                                                  y=y,
                                                  groups=groups):
        # suppose nth_split >= 1
        if i == nth_split:
            break
        else:
            i += 1

    train = array.iloc[train_index, :]
    test = array.iloc[test_index, :]

    return train, test
Пример #5
0
def main(inputs, infile_estimator, infile1, infile2,
         outfile_result, outfile_object=None,
         outfile_weights=None, groups=None,
         ref_seq=None, intervals=None, targets=None,
         fasta_path=None):
    """
    Parameter
    ---------
    inputs : str
        File path to galaxy tool parameter

    infile_estimator : str
        File path to estimator

    infile1 : str
        File path to dataset containing features

    infile2 : str
        File path to dataset containing target values

    outfile_result : str
        File path to save the results, either cv_results or test result

    outfile_object : str, optional
        File path to save searchCV object

    outfile_weights : str, optional
        File path to save model weights

    groups : str
        File path to dataset containing groups labels

    ref_seq : str
        File path to dataset containing genome sequence file

    intervals : str
        File path to dataset containing interval file

    targets : str
        File path to dataset compressed target bed file

    fasta_path : str
        File path to dataset containing fasta file
    """
    warnings.simplefilter('ignore')

    with open(inputs, 'r') as param_handler:
        params = json.load(param_handler)

    params_builder = params['search_schemes']['search_params_builder']

    with open(infile_estimator, 'rb') as estimator_handler:
        estimator = load_model(estimator_handler)
    estimator_params = estimator.get_params()

    # store read dataframe object
    loaded_df = {}

    input_type = params['input_options']['selected_input']
    # tabular input
    if input_type == 'tabular':
        header = 'infer' if params['input_options']['header1'] else None
        column_option = (params['input_options']['column_selector_options_1']
                         ['selected_column_selector_option'])
        if column_option in ['by_index_number', 'all_but_by_index_number',
                             'by_header_name', 'all_but_by_header_name']:
            c = params['input_options']['column_selector_options_1']['col1']
        else:
            c = None

        df_key = infile1 + repr(header)
        df = pd.read_csv(infile1, sep='\t', header=header,
                         parse_dates=True)
        loaded_df[df_key] = df

        X = read_columns(df, c=c, c_option=column_option).astype(float)
    # sparse input
    elif input_type == 'sparse':
        X = mmread(open(infile1, 'r'))

    # fasta_file input
    elif input_type == 'seq_fasta':
        pyfaidx = get_module('pyfaidx')
        sequences = pyfaidx.Fasta(fasta_path)
        n_seqs = len(sequences.keys())
        X = np.arange(n_seqs)[:, np.newaxis]
        for param in estimator_params.keys():
            if param.endswith('fasta_path'):
                estimator.set_params(
                    **{param: fasta_path})
                break
        else:
            raise ValueError(
                "The selected estimator doesn't support "
                "fasta file input! Please consider using "
                "KerasGBatchClassifier with "
                "FastaDNABatchGenerator/FastaProteinBatchGenerator "
                "or having GenomeOneHotEncoder/ProteinOneHotEncoder "
                "in pipeline!")

    elif input_type == 'refseq_and_interval':
        path_params = {
            'data_batch_generator__ref_genome_path': ref_seq,
            'data_batch_generator__intervals_path': intervals,
            'data_batch_generator__target_path': targets
        }
        estimator.set_params(**path_params)
        n_intervals = sum(1 for line in open(intervals))
        X = np.arange(n_intervals)[:, np.newaxis]

    # Get target y
    header = 'infer' if params['input_options']['header2'] else None
    column_option = (params['input_options']['column_selector_options_2']
                     ['selected_column_selector_option2'])
    if column_option in ['by_index_number', 'all_but_by_index_number',
                         'by_header_name', 'all_but_by_header_name']:
        c = params['input_options']['column_selector_options_2']['col2']
    else:
        c = None

    df_key = infile2 + repr(header)
    if df_key in loaded_df:
        infile2 = loaded_df[df_key]
    else:
        infile2 = pd.read_csv(infile2, sep='\t',
                              header=header, parse_dates=True)
        loaded_df[df_key] = infile2

    y = read_columns(
            infile2,
            c=c,
            c_option=column_option,
            sep='\t',
            header=header,
            parse_dates=True)
    if len(y.shape) == 2 and y.shape[1] == 1:
        y = y.ravel()
    if input_type == 'refseq_and_interval':
        estimator.set_params(
            data_batch_generator__features=y.ravel().tolist())
        y = None
    # end y

    optimizer = params['search_schemes']['selected_search_scheme']
    optimizer = getattr(model_selection, optimizer)

    # handle gridsearchcv options
    options = params['search_schemes']['options']

    if groups:
        header = 'infer' if (options['cv_selector']['groups_selector']
                                    ['header_g']) else None
        column_option = (options['cv_selector']['groups_selector']
                                ['column_selector_options_g']
                                ['selected_column_selector_option_g'])
        if column_option in ['by_index_number', 'all_but_by_index_number',
                             'by_header_name', 'all_but_by_header_name']:
            c = (options['cv_selector']['groups_selector']
                        ['column_selector_options_g']['col_g'])
        else:
            c = None

        df_key = groups + repr(header)
        if df_key in loaded_df:
            groups = loaded_df[df_key]

        groups = read_columns(
                groups,
                c=c,
                c_option=column_option,
                sep='\t',
                header=header,
                parse_dates=True)
        groups = groups.ravel()
        options['cv_selector']['groups_selector'] = groups

    splitter, groups = get_cv(options.pop('cv_selector'))
    options['cv'] = splitter
    options['n_jobs'] = N_JOBS
    primary_scoring = options['scoring']['primary_scoring']
    options['scoring'] = get_scoring(options['scoring'])
    if options['error_score']:
        options['error_score'] = 'raise'
    else:
        options['error_score'] = np.NaN
    if options['refit'] and isinstance(options['scoring'], dict):
        options['refit'] = primary_scoring
    if 'pre_dispatch' in options and options['pre_dispatch'] == '':
        options['pre_dispatch'] = None

    # del loaded_df
    del loaded_df

    # handle memory
    memory = joblib.Memory(location=CACHE_DIR, verbose=0)
    # cache iraps_core fits could increase search speed significantly
    if estimator.__class__.__name__ == 'IRAPSClassifier':
        estimator.set_params(memory=memory)
    else:
        # For iraps buried in pipeline
        for p, v in estimator_params.items():
            if p.endswith('memory'):
                # for case of `__irapsclassifier__memory`
                if len(p) > 8 and p[:-8].endswith('irapsclassifier'):
                    # cache iraps_core fits could increase search
                    # speed significantly
                    new_params = {p: memory}
                    estimator.set_params(**new_params)
                # security reason, we don't want memory being
                # modified unexpectedly
                elif v:
                    new_params = {p, None}
                    estimator.set_params(**new_params)
            # For now, 1 CPU is suggested for iprasclassifier
            elif p.endswith('n_jobs'):
                new_params = {p: 1}
                estimator.set_params(**new_params)
            # for security reason, types of callbacks are limited
            elif p.endswith('callbacks'):
                for cb in v:
                    cb_type = cb['callback_selection']['callback_type']
                    if cb_type not in ALLOWED_CALLBACKS:
                        raise ValueError(
                            "Prohibited callback type: %s!" % cb_type)

    param_grid = _eval_search_params(params_builder)
    searcher = optimizer(estimator, param_grid, **options)

    # do nested split
    split_mode = params['outer_split'].pop('split_mode')
    # nested CV, outer cv using cross_validate
    if split_mode == 'nested_cv':
        outer_cv, _ = get_cv(params['outer_split']['cv_selector'])

        if options['error_score'] == 'raise':
            rval = cross_validate(
                searcher, X, y, scoring=options['scoring'],
                cv=outer_cv, n_jobs=N_JOBS, verbose=0,
                error_score=options['error_score'])
        else:
            warnings.simplefilter('always', FitFailedWarning)
            with warnings.catch_warnings(record=True) as w:
                try:
                    rval = cross_validate(
                        searcher, X, y,
                        scoring=options['scoring'],
                        cv=outer_cv, n_jobs=N_JOBS,
                        verbose=0,
                        error_score=options['error_score'])
                except ValueError:
                    pass
                for warning in w:
                    print(repr(warning.message))

        keys = list(rval.keys())
        for k in keys:
            if k.startswith('test'):
                rval['mean_' + k] = np.mean(rval[k])
                rval['std_' + k] = np.std(rval[k])
            if k.endswith('time'):
                rval.pop(k)
        rval = pd.DataFrame(rval)
        rval = rval[sorted(rval.columns)]
        rval.to_csv(path_or_buf=outfile_result, sep='\t',
                    header=True, index=False)
    else:
        if split_mode == 'train_test_split':
            train_test_split = try_get_attr(
                'galaxy_ml.model_validations', 'train_test_split')
            # make sure refit is choosen
            # this could be True for sklearn models, but not the case for
            # deep learning models
            if not options['refit'] and \
                    not all(hasattr(estimator, attr)
                            for attr in ('config', 'model_type')):
                warnings.warn("Refit is change to `True` for nested "
                              "validation!")
                setattr(searcher, 'refit', True)
            split_options = params['outer_split']

            # splits
            if split_options['shuffle'] == 'stratified':
                split_options['labels'] = y
                X, X_test, y, y_test = train_test_split(X, y, **split_options)
            elif split_options['shuffle'] == 'group':
                if groups is None:
                    raise ValueError("No group based CV option was "
                                     "choosen for group shuffle!")
                split_options['labels'] = groups
                if y is None:
                    X, X_test, groups, _ =\
                        train_test_split(X, groups, **split_options)
                else:
                    X, X_test, y, y_test, groups, _ =\
                        train_test_split(X, y, groups, **split_options)
            else:
                if split_options['shuffle'] == 'None':
                    split_options['shuffle'] = None
                X, X_test, y, y_test =\
                    train_test_split(X, y, **split_options)
        # end train_test_split

        # shared by both train_test_split and non-split
        if options['error_score'] == 'raise':
            searcher.fit(X, y, groups=groups)
        else:
            warnings.simplefilter('always', FitFailedWarning)
            with warnings.catch_warnings(record=True) as w:
                try:
                    searcher.fit(X, y, groups=groups)
                except ValueError:
                    pass
                for warning in w:
                    print(repr(warning.message))

        # no outer split
        if split_mode == 'no':
            # save results
            cv_results = pd.DataFrame(searcher.cv_results_)
            cv_results = cv_results[sorted(cv_results.columns)]
            cv_results.to_csv(path_or_buf=outfile_result, sep='\t',
                              header=True, index=False)

        # train_test_split, output test result using best_estimator_
        # or rebuild the trained estimator using weights if applicable.
        else:
            scorer_ = searcher.scorer_
            if isinstance(scorer_, collections.Mapping):
                is_multimetric = True
            else:
                is_multimetric = False

            best_estimator_ = getattr(searcher, 'best_estimator_', None)
            if not best_estimator_:
                raise ValueError("GridSearchCV object has no "
                                 "`best_estimator_` when `refit`=False!")

            if best_estimator_.__class__.__name__ == 'KerasGBatchClassifier' \
                    and hasattr(estimator.data_batch_generator, 'target_path'):
                test_score = best_estimator_.evaluate(
                    X_test, scorer=scorer_, is_multimetric=is_multimetric)
            else:
                test_score = _score(best_estimator_, X_test,
                                    y_test, scorer_,
                                    is_multimetric=is_multimetric)

            if not is_multimetric:
                test_score = {primary_scoring: test_score}
            for key, value in test_score.items():
                test_score[key] = [value]
            result_df = pd.DataFrame(test_score)
            result_df.to_csv(path_or_buf=outfile_result, sep='\t',
                             header=True, index=False)

    memory.clear(warn=False)

    if outfile_object:
        best_estimator_ = getattr(searcher, 'best_estimator_', None)
        if not best_estimator_:
            warnings.warn("GridSearchCV object has no attribute "
                          "'best_estimator_', because either it's "
                          "nested gridsearch or `refit` is False!")
            return

        main_est = best_estimator_
        if isinstance(best_estimator_, pipeline.Pipeline):
            main_est = best_estimator_.steps[-1][-1]

        if hasattr(main_est, 'model_') \
                and hasattr(main_est, 'save_weights'):
            if outfile_weights:
                main_est.save_weights(outfile_weights)
            del main_est.model_
            del main_est.fit_params
            del main_est.model_class_
            del main_est.validation_data
            if getattr(main_est, 'data_generator_', None):
                del main_est.data_generator_
                del main_est.data_batch_generator

        with open(outfile_object, 'wb') as output_handler:
            pickle.dump(best_estimator_, output_handler,
                        pickle.HIGHEST_PROTOCOL)
Пример #6
0
def main(
    inputs,
    infile_estimator,
    infile1,
    infile2,
    outfile_result,
    outfile_object=None,
    outfile_weights=None,
    groups=None,
    ref_seq=None,
    intervals=None,
    targets=None,
    fasta_path=None,
):
    """
    Parameter
    ---------
    inputs : str
        File path to galaxy tool parameter

    infile_estimator : str
        File path to estimator

    infile1 : str
        File path to dataset containing features

    infile2 : str
        File path to dataset containing target values

    outfile_result : str
        File path to save the results, either cv_results or test result

    outfile_object : str, optional
        File path to save searchCV object

    outfile_weights : str, optional
        File path to save model weights

    groups : str
        File path to dataset containing groups labels

    ref_seq : str
        File path to dataset containing genome sequence file

    intervals : str
        File path to dataset containing interval file

    targets : str
        File path to dataset compressed target bed file

    fasta_path : str
        File path to dataset containing fasta file
    """
    warnings.simplefilter("ignore")

    # store read dataframe object
    loaded_df = {}

    with open(inputs, "r") as param_handler:
        params = json.load(param_handler)

    # Override the refit parameter
    params["search_schemes"]["options"]["refit"] = (
        True if params["save"] != "nope" else False
    )

    with open(infile_estimator, "rb") as estimator_handler:
        estimator = load_model(estimator_handler)

    optimizer = params["search_schemes"]["selected_search_scheme"]
    optimizer = getattr(model_selection, optimizer)

    # handle gridsearchcv options
    options = params["search_schemes"]["options"]

    if groups:
        header = (
            "infer" if (options["cv_selector"]["groups_selector"]["header_g"]) else None
        )
        column_option = options["cv_selector"]["groups_selector"][
            "column_selector_options_g"
        ]["selected_column_selector_option_g"]
        if column_option in [
            "by_index_number",
            "all_but_by_index_number",
            "by_header_name",
            "all_but_by_header_name",
        ]:
            c = options["cv_selector"]["groups_selector"]["column_selector_options_g"][
                "col_g"
            ]
        else:
            c = None

        df_key = groups + repr(header)

        groups = pd.read_csv(groups, sep="\t", header=header, parse_dates=True)
        loaded_df[df_key] = groups

        groups = read_columns(
            groups,
            c=c,
            c_option=column_option,
            sep="\t",
            header=header,
            parse_dates=True,
        )
        groups = groups.ravel()
        options["cv_selector"]["groups_selector"] = groups

    splitter, groups = get_cv(options.pop("cv_selector"))
    options["cv"] = splitter
    primary_scoring = options["scoring"]["primary_scoring"]
    # get_scoring() expects secondary_scoring to be a comma separated string (not a list)
    # Check if secondary_scoring is specified
    secondary_scoring = options["scoring"].get("secondary_scoring", None)
    if secondary_scoring is not None:
        # If secondary_scoring is specified, convert the list into comman separated string
        options["scoring"]["secondary_scoring"] = ",".join(
            options["scoring"]["secondary_scoring"]
        )
    options["scoring"] = get_scoring(options["scoring"])
    if options["error_score"]:
        options["error_score"] = "raise"
    else:
        options["error_score"] = np.nan
    if options["refit"] and isinstance(options["scoring"], dict):
        options["refit"] = primary_scoring
    if "pre_dispatch" in options and options["pre_dispatch"] == "":
        options["pre_dispatch"] = None

    params_builder = params["search_schemes"]["search_params_builder"]
    param_grid = _eval_search_params(params_builder)

    estimator = clean_params(estimator)

    # save the SearchCV object without fit
    if params["save"] == "save_no_fit":
        searcher = optimizer(estimator, param_grid, **options)
        print(searcher)
        with open(outfile_object, "wb") as output_handler:
            pickle.dump(searcher, output_handler, pickle.HIGHEST_PROTOCOL)
        return 0

    # read inputs and loads new attributes, like paths
    estimator, X, y = _handle_X_y(
        estimator,
        params,
        infile1,
        infile2,
        loaded_df=loaded_df,
        ref_seq=ref_seq,
        intervals=intervals,
        targets=targets,
        fasta_path=fasta_path,
    )

    # cache iraps_core fits could increase search speed significantly
    memory = joblib.Memory(location=CACHE_DIR, verbose=0)
    main_est = get_main_estimator(estimator)
    if main_est.__class__.__name__ == "IRAPSClassifier":
        main_est.set_params(memory=memory)

    searcher = optimizer(estimator, param_grid, **options)

    split_mode = params["outer_split"].pop("split_mode")

    if split_mode == "nested_cv":
        # make sure refit is choosen
        # this could be True for sklearn models, but not the case for
        # deep learning models
        if not options["refit"] and not all(
            hasattr(estimator, attr) for attr in ("config", "model_type")
        ):
            warnings.warn("Refit is change to `True` for nested validation!")
            setattr(searcher, "refit", True)

        outer_cv, _ = get_cv(params["outer_split"]["cv_selector"])
        # nested CV, outer cv using cross_validate
        if options["error_score"] == "raise":
            rval = cross_validate(
                searcher,
                X,
                y,
                scoring=options["scoring"],
                cv=outer_cv,
                n_jobs=N_JOBS,
                verbose=options["verbose"],
                return_estimator=(params["save"] == "save_estimator"),
                error_score=options["error_score"],
                return_train_score=True,
            )
        else:
            warnings.simplefilter("always", FitFailedWarning)
            with warnings.catch_warnings(record=True) as w:
                try:
                    rval = cross_validate(
                        searcher,
                        X,
                        y,
                        scoring=options["scoring"],
                        cv=outer_cv,
                        n_jobs=N_JOBS,
                        verbose=options["verbose"],
                        return_estimator=(params["save"] == "save_estimator"),
                        error_score=options["error_score"],
                        return_train_score=True,
                    )
                except ValueError:
                    pass
                for warning in w:
                    print(repr(warning.message))

        fitted_searchers = rval.pop("estimator", [])
        if fitted_searchers:
            import os

            pwd = os.getcwd()
            save_dir = os.path.join(pwd, "cv_results_in_folds")
            try:
                os.mkdir(save_dir)
                for idx, obj in enumerate(fitted_searchers):
                    target_name = "cv_results_" + "_" + "split%d" % idx
                    target_path = os.path.join(pwd, save_dir, target_name)
                    cv_results_ = getattr(obj, "cv_results_", None)
                    if not cv_results_:
                        print("%s is not available" % target_name)
                        continue
                    cv_results_ = pd.DataFrame(cv_results_)
                    cv_results_ = cv_results_[sorted(cv_results_.columns)]
                    cv_results_.to_csv(target_path, sep="\t", header=True, index=False)
            except Exception as e:
                print(e)
            finally:
                del os

        keys = list(rval.keys())
        for k in keys:
            if k.startswith("test"):
                rval["mean_" + k] = np.mean(rval[k])
                rval["std_" + k] = np.std(rval[k])
            if k.endswith("time"):
                rval.pop(k)
        rval = pd.DataFrame(rval)
        rval = rval[sorted(rval.columns)]
        rval.to_csv(path_or_buf=outfile_result, sep="\t", header=True, index=False)
        # deprecate train test split mode
        """searcher = _do_train_test_split_val(
            searcher, X, y, params,
            primary_scoring=primary_scoring,
            error_score=options['error_score'],
            groups=groups,
            outfile=outfile_result)"""
        return 0

    # no outer split
    else:
        searcher.set_params(n_jobs=N_JOBS)
        if options["error_score"] == "raise":
            searcher.fit(X, y, groups=groups)
        else:
            warnings.simplefilter("always", FitFailedWarning)
            with warnings.catch_warnings(record=True) as w:
                try:
                    searcher.fit(X, y, groups=groups)
                except ValueError:
                    pass
                for warning in w:
                    print(repr(warning.message))

        cv_results = pd.DataFrame(searcher.cv_results_)
        cv_results = cv_results[sorted(cv_results.columns)]
        cv_results.to_csv(
            path_or_buf=outfile_result, sep="\t", header=True, index=False
        )

    memory.clear(warn=False)

    # output best estimator, and weights if applicable
    if outfile_object:
        best_estimator_ = getattr(searcher, "best_estimator_", None)
        if not best_estimator_:
            warnings.warn(
                "GridSearchCV object has no attribute "
                "'best_estimator_', because either it's "
                "nested gridsearch or `refit` is False!"
            )
            return

        # clean prams
        best_estimator_ = clean_params(best_estimator_)

        main_est = get_main_estimator(best_estimator_)

        if hasattr(main_est, "model_") and hasattr(main_est, "save_weights"):
            if outfile_weights:
                main_est.save_weights(outfile_weights)
            del main_est.model_
            del main_est.fit_params
            del main_est.model_class_
            del main_est.validation_data
            if getattr(main_est, "data_generator_", None):
                del main_est.data_generator_

        with open(outfile_object, "wb") as output_handler:
            print("Best estimator is saved: %s " % repr(best_estimator_))
            pickle.dump(best_estimator_, output_handler, pickle.HIGHEST_PROTOCOL)
Пример #7
0
def main(inputs_path,
         output_obj,
         base_paths=None,
         meta_path=None,
         outfile_params=None):
    """
    Parameter
    ---------
    inputs_path : str
        File path for Galaxy parameters

    output_obj : str
        File path for ensemble estimator ouput

    base_paths : str
        File path or paths concatenated by comma.

    meta_path : str
        File path

    outfile_params : str
        File path for params output
    """
    with open(inputs_path, "r") as param_handler:
        params = json.load(param_handler)

    estimator_type = params["algo_selection"]["estimator_type"]
    # get base estimators
    base_estimators = []
    for idx, base_file in enumerate(base_paths.split(",")):
        if base_file and base_file != "None":
            with open(base_file, "rb") as handler:
                model = load_model(handler)
        else:
            estimator_json = params["base_est_builder"][idx][
                "estimator_selector"]
            model = get_estimator(estimator_json)

        if estimator_type.startswith("sklearn"):
            named = model.__class__.__name__.lower()
            named = "base_%d_%s" % (idx, named)
            base_estimators.append((named, model))
        else:
            base_estimators.append(model)

    # get meta estimator, if applicable
    if estimator_type.startswith("mlxtend"):
        if meta_path:
            with open(meta_path, "rb") as f:
                meta_estimator = load_model(f)
        else:
            estimator_json = params["algo_selection"]["meta_estimator"][
                "estimator_selector"]
            meta_estimator = get_estimator(estimator_json)

    options = params["algo_selection"]["options"]

    cv_selector = options.pop("cv_selector", None)
    if cv_selector:
        splitter, _groups = get_cv(cv_selector)
        options["cv"] = splitter
        # set n_jobs
        options["n_jobs"] = N_JOBS

    weights = options.pop("weights", None)
    if weights:
        weights = ast.literal_eval(weights)
        if weights:
            options["weights"] = weights

    mod_and_name = estimator_type.split("_")
    mod = sys.modules[mod_and_name[0]]
    klass = getattr(mod, mod_and_name[1])

    if estimator_type.startswith("sklearn"):
        options["n_jobs"] = N_JOBS
        ensemble_estimator = klass(base_estimators, **options)

    elif mod == mlxtend.classifier:
        ensemble_estimator = klass(classifiers=base_estimators,
                                   meta_classifier=meta_estimator,
                                   **options)

    else:
        ensemble_estimator = klass(regressors=base_estimators,
                                   meta_regressor=meta_estimator,
                                   **options)

    print(ensemble_estimator)
    for base_est in base_estimators:
        print(base_est)

    with open(output_obj, "wb") as out_handler:
        pickle.dump(ensemble_estimator, out_handler, pickle.HIGHEST_PROTOCOL)

    if params["get_params"] and outfile_params:
        results = get_search_params(ensemble_estimator)
        df = pd.DataFrame(results, columns=["", "Parameter", "Value"])
        df.to_csv(outfile_params, sep="\t", index=False)
Пример #8
0
def main(inputs,
         infile_estimator,
         infile1,
         infile2,
         outfile_result,
         outfile_object=None,
         groups=None,
         ref_seq=None,
         intervals=None,
         targets=None,
         fasta_path=None):
    """
    Parameter
    ---------
    inputs : str
        File path to galaxy tool parameter.

    infile_estimator : str
        File path to estimator.

    infile1 : str
        File path to dataset containing features

    infile2 : str
        File path to dataset containing target values

    outfile_result : str
        File path to save the results, either cv_results or test result

    outfile_object : str, optional
        File path to save searchCV object

    groups : str
        File path to dataset containing groups labels

    ref_seq : str
        File path to dataset containing genome sequence file

    intervals : str
        File path to dataset containing interval file

    targets : str
        File path to dataset compressed target bed file

    fasta_path : str
        File path to dataset containing fasta file
    """
    warnings.simplefilter('ignore')

    # store read dataframe object
    loaded_df = {}

    with open(inputs, 'r') as param_handler:
        params = json.load(param_handler)

    # Override the refit parameter
    params['search_schemes']['options']['refit'] = True \
        if (params['save'] != 'nope' or
            params['outer_split']['split_mode'] == 'nested_cv') else False

    estimator = load_model_from_h5(infile_estimator)

    estimator = clean_params(estimator)

    if estimator.__class__.__name__ == 'KerasGBatchClassifier':
        _fit_and_score = try_get_attr('galaxy_ml.model_validations',
                                      '_fit_and_score')

        setattr(_search, '_fit_and_score', _fit_and_score)
        setattr(_validation, '_fit_and_score', _fit_and_score)

    optimizer = params['search_schemes']['selected_search_scheme']
    if optimizer == 'skopt.BayesSearchCV':
        optimizer = BayesSearchCV
    else:
        optimizer = getattr(model_selection, optimizer)

    # handle gridsearchcv options
    options = params['search_schemes']['options']

    if groups:
        header = 'infer' if (
            options['cv_selector']['groups_selector']['header_g']) else None
        column_option = (
            options['cv_selector']['groups_selector']
            ['column_selector_options_g']['selected_column_selector_option_g'])
        if column_option in [
                'by_index_number', 'all_but_by_index_number', 'by_header_name',
                'all_but_by_header_name'
        ]:
            c = (options['cv_selector']['groups_selector']
                 ['column_selector_options_g']['col_g'])
        else:
            c = None

        df_key = groups + repr(header)

        groups = pd.read_csv(groups, sep='\t', header=header, parse_dates=True)
        loaded_df[df_key] = groups

        groups = read_columns(groups,
                              c=c,
                              c_option=column_option,
                              sep='\t',
                              header=header,
                              parse_dates=True)
        groups = groups.ravel()
        options['cv_selector']['groups_selector'] = groups

    cv_selector = options.pop('cv_selector')
    if Version(galaxy_ml_version) < Version('0.8.3'):
        cv_selector.pop('n_stratification_bins', None)
    splitter, groups = get_cv(cv_selector)
    options['cv'] = splitter
    primary_scoring = options['scoring']['primary_scoring']
    options['scoring'] = get_scoring(options['scoring'])
    # TODO make BayesSearchCV support multiple scoring
    if optimizer == 'skopt.BayesSearchCV' and \
            isinstance(options['scoring'], dict):
        options['scoring'] = options['scoring'][primary_scoring]
        warnings.warn("BayesSearchCV doesn't support multiple "
                      "scorings! Primary scoring is used.")
    if options['error_score']:
        options['error_score'] = 'raise'
    else:
        options['error_score'] = np.NaN
    if options['refit'] and isinstance(options['scoring'], dict):
        options['refit'] = primary_scoring
    if 'pre_dispatch' in options and options['pre_dispatch'] == '':
        options['pre_dispatch'] = None

    params_builder = params['search_schemes']['search_params_builder']
    param_grid = _eval_search_params(params_builder)

    # save the SearchCV object without fit
    if params['save'] == 'save_no_fit':
        searcher = optimizer(estimator, param_grid, **options)
        dump_model_to_h5(searcher, outfile_object)
        return 0

    # read inputs and loads new attributes, like paths
    estimator, X, y = _handle_X_y(estimator,
                                  params,
                                  infile1,
                                  infile2,
                                  loaded_df=loaded_df,
                                  ref_seq=ref_seq,
                                  intervals=intervals,
                                  targets=targets,
                                  fasta_path=fasta_path)

    # cache iraps_core fits could increase search speed significantly
    memory = joblib.Memory(location=CACHE_DIR, verbose=0)
    estimator = _set_memory(estimator, memory)

    searcher = optimizer(estimator, param_grid, **options)

    split_mode = params['outer_split'].pop('split_mode')

    # Nested CV
    if split_mode == 'nested_cv':
        cv_selector = params['outer_split']['cv_selector']
        if Version(galaxy_ml_version) < Version('0.8.3'):
            cv_selector.pop('n_stratification_bins', None)
        outer_cv, _ = get_cv(cv_selector)
        # nested CV, outer cv using cross_validate
        if options['error_score'] == 'raise':
            rval = cross_validate(
                searcher,
                X,
                y,
                groups=groups,
                scoring=options['scoring'],
                cv=outer_cv,
                n_jobs=N_JOBS,
                verbose=options['verbose'],
                fit_params={'groups': groups},
                return_estimator=(params['save'] == 'save_estimator'),
                error_score=options['error_score'],
                return_train_score=True)
        else:
            warnings.simplefilter('always', FitFailedWarning)
            with warnings.catch_warnings(record=True) as w:
                try:
                    rval = cross_validate(
                        searcher,
                        X,
                        y,
                        groups=groups,
                        scoring=options['scoring'],
                        cv=outer_cv,
                        n_jobs=N_JOBS,
                        verbose=options['verbose'],
                        fit_params={'groups': groups},
                        return_estimator=(params['save'] == 'save_estimator'),
                        error_score=options['error_score'],
                        return_train_score=True)
                except ValueError:
                    pass
                for warning in w:
                    print(repr(warning.message))

        fitted_searchers = rval.pop('estimator', [])
        if fitted_searchers:
            import os
            pwd = os.getcwd()
            save_dir = os.path.join(pwd, 'cv_results_in_folds')
            try:
                os.mkdir(save_dir)
                for idx, obj in enumerate(fitted_searchers):
                    target_name = 'cv_results_' + '_' + 'split%d' % idx
                    target_path = os.path.join(pwd, save_dir, target_name)
                    cv_results_ = getattr(obj, 'cv_results_', None)
                    if not cv_results_:
                        print("%s is not available" % target_name)
                        continue
                    cv_results_ = pd.DataFrame(cv_results_)
                    cv_results_ = cv_results_[sorted(cv_results_.columns)]
                    cv_results_.to_csv(target_path,
                                       sep='\t',
                                       header=True,
                                       index=False)
            except Exception as e:
                print(e)
            finally:
                del os

        keys = list(rval.keys())
        for k in keys:
            if k.startswith('test'):
                rval['mean_' + k] = np.mean(rval[k])
                rval['std_' + k] = np.std(rval[k])
            if k.endswith('time'):
                rval.pop(k)
        rval = pd.DataFrame(rval)
        rval = rval[sorted(rval.columns)]
        rval.to_csv(path_or_buf=outfile_result,
                    sep='\t',
                    header=True,
                    index=False)

        return 0

        # deprecate train test split mode
        """searcher = _do_train_test_split_val(
            searcher, X, y, params,
            primary_scoring=primary_scoring,
            error_score=options['error_score'],
            groups=groups,
            outfile=outfile_result)"""

    # no outer split
    else:
        searcher.set_params(n_jobs=N_JOBS)
        if options['error_score'] == 'raise':
            searcher.fit(X, y, groups=groups)
        else:
            warnings.simplefilter('always', FitFailedWarning)
            with warnings.catch_warnings(record=True) as w:
                try:
                    searcher.fit(X, y, groups=groups)
                except ValueError:
                    pass
                for warning in w:
                    print(repr(warning.message))

        cv_results = pd.DataFrame(searcher.cv_results_)
        cv_results = cv_results[sorted(cv_results.columns)]
        cv_results.to_csv(path_or_buf=outfile_result,
                          sep='\t',
                          header=True,
                          index=False)

    memory.clear(warn=False)

    # output best estimator, and weights if applicable
    if outfile_object:
        best_estimator_ = getattr(searcher, 'best_estimator_', None)
        if not best_estimator_:
            warnings.warn("GridSearchCV object has no attribute "
                          "'best_estimator_', because either it's "
                          "nested gridsearch or `refit` is False!")
            return

        print("Saving best estimator: %s " % repr(best_estimator_))
        dump_model_to_h5(best_estimator_, outfile_object)