Exemplo n.º 1
0
def get_batch_generator(config):
    """Construct keras online data generator from Galaxy tool parameters

    Parameters
    -----------
    config : dictionary, galaxy tool parameters loaded by JSON
    """
    generator_type = config.pop('generator_type')
    klass = try_get_attr('galaxy_ml.preprocessors', generator_type)

    if generator_type == 'GenomicIntervalBatchGenerator':
        config['ref_genome_path'] = 'to_be_determined'
        config['intervals_path'] = 'to_be_determined'
        config['target_path'] = 'to_be_determined'
        config['features'] = 'to_be_determined'
    else:
        config['fasta_path'] = 'to_be_determined'

    return klass(**config)
Exemplo n.º 2
0
def get_batch_generator(config):
    """
    Construct keras online data generator from Galaxy tool parameters

    Parameters
    -----------
    config : dictionary, galaxy tool parameters loaded by JSON
    """
    generator_type = config.pop("generator_type")
    if generator_type == "none":
        return None

    klass = try_get_attr("galaxy_ml.preprocessors", generator_type)

    if generator_type == "GenomicIntervalBatchGenerator":
        config["ref_genome_path"] = "to_be_determined"
        config["intervals_path"] = "to_be_determined"
        config["target_path"] = "to_be_determined"
        config["features"] = "to_be_determined"
    else:
        config["fasta_path"] = "to_be_determined"

    return klass(**config)
Exemplo n.º 3
0
def main(inputs,
         infile_estimator,
         outfile_predict,
         infile_weights=None,
         infile1=None,
         fasta_path=None,
         ref_seq=None,
         vcf_path=None):
    """
    Parameter
    ---------
    inputs : str
        File path to galaxy tool parameter

    infile_estimator : str
        File path to trained estimator input

    outfile_predict : str
        File path to save the prediction results, tabular

    infile_weights : str
        File path to weights input

    infile1 : str
        File path to dataset containing features

    fasta_path : str
        File path to dataset containing fasta file

    ref_seq : str
        File path to dataset containing the reference genome sequence.

    vcf_path : str
        File path to dataset containing variants info.
    """
    warnings.filterwarnings('ignore')

    with open(inputs, 'r') as param_handler:
        params = json.load(param_handler)

    # load model
    with open(infile_estimator, 'rb') as est_handler:
        estimator = load_model(est_handler)

    main_est = estimator
    if isinstance(estimator, Pipeline):
        main_est = estimator.steps[-1][-1]
    if hasattr(main_est, 'config') and hasattr(main_est, 'load_weights'):
        if not infile_weights or infile_weights == 'None':
            raise ValueError("The selected model skeleton asks for weights, "
                             "but dataset for weights wan not selected!")
        main_est.load_weights(infile_weights)

    # handle data input
    input_type = params['input_options']['selected_input']
    # tabular input
    if input_type == 'tabular':
        header = 'infer' if params['input_options']['header1'] else None
        column_option = (params['input_options']['column_selector_options_1']
                         ['selected_column_selector_option'])
        if column_option in [
                'by_index_number', 'all_but_by_index_number', 'by_header_name',
                'all_but_by_header_name'
        ]:
            c = params['input_options']['column_selector_options_1']['col1']
        else:
            c = None

        df = pd.read_csv(infile1, sep='\t', header=header, parse_dates=True)

        X = read_columns(df, c=c, c_option=column_option).astype(float)

        if params['method'] == 'predict':
            preds = estimator.predict(X)
        else:
            preds = estimator.predict_proba(X)

    # sparse input
    elif input_type == 'sparse':
        X = mmread(open(infile1, 'r'))
        if params['method'] == 'predict':
            preds = estimator.predict(X)
        else:
            preds = estimator.predict_proba(X)

    # fasta input
    elif input_type == 'seq_fasta':
        if not hasattr(estimator, 'data_batch_generator'):
            raise ValueError("To do prediction on sequences in fasta input, "
                             "the estimator must be a `KerasGBatchClassifier`"
                             "equipped with data_batch_generator!")
        pyfaidx = get_module('pyfaidx')
        sequences = pyfaidx.Fasta(fasta_path)
        n_seqs = len(sequences.keys())
        X = np.arange(n_seqs)[:, np.newaxis]
        seq_length = estimator.data_batch_generator.seq_length
        batch_size = getattr(estimator, 'batch_size', 32)
        steps = (n_seqs + batch_size - 1) // batch_size

        seq_type = params['input_options']['seq_type']
        klass = try_get_attr('galaxy_ml.preprocessors', seq_type)

        pred_data_generator = klass(fasta_path, seq_length=seq_length)

        if params['method'] == 'predict':
            preds = estimator.predict(X,
                                      data_generator=pred_data_generator,
                                      steps=steps)
        else:
            preds = estimator.predict_proba(X,
                                            data_generator=pred_data_generator,
                                            steps=steps)

    # vcf input
    elif input_type == 'variant_effect':
        klass = try_get_attr('galaxy_ml.preprocessors',
                             'GenomicVariantBatchGenerator')

        options = params['input_options']
        options.pop('selected_input')
        if options['blacklist_regions'] == 'none':
            options['blacklist_regions'] = None

        pred_data_generator = klass(ref_genome_path=ref_seq,
                                    vcf_path=vcf_path,
                                    **options)

        pred_data_generator.set_processing_attrs()

        variants = pred_data_generator.variants

        # predict 1600 sample at once then write to file
        gen_flow = pred_data_generator.flow(batch_size=1600)

        file_writer = open(outfile_predict, 'w')
        header_row = '\t'.join(
            ['chrom', 'pos', 'name', 'ref', 'alt', 'strand'])
        file_writer.write(header_row)
        header_done = False

        steps_done = 0

        # TODO: multiple threading
        try:
            while steps_done < len(gen_flow):
                index_array = next(gen_flow.index_generator)
                batch_X = gen_flow._get_batches_of_transformed_samples(
                    index_array)

                if params['method'] == 'predict':
                    batch_preds = estimator.predict(
                        batch_X,
                        # The presence of `pred_data_generator` below is to
                        # override model carrying data_generator if there
                        # is any.
                        data_generator=pred_data_generator)
                else:
                    batch_preds = estimator.predict_proba(
                        batch_X,
                        # The presence of `pred_data_generator` below is to
                        # override model carrying data_generator if there
                        # is any.
                        data_generator=pred_data_generator)

                if batch_preds.ndim == 1:
                    batch_preds = batch_preds[:, np.newaxis]

                batch_meta = variants[index_array]
                batch_out = np.column_stack([batch_meta, batch_preds])

                if not header_done:
                    heads = np.arange(batch_preds.shape[-1]).astype(str)
                    heads_str = '\t'.join(heads)
                    file_writer.write("\t%s\n" % heads_str)
                    header_done = True

                for row in batch_out:
                    row_str = '\t'.join(row)
                    file_writer.write("%s\n" % row_str)

                steps_done += 1

        finally:
            file_writer.close()
            # TODO: make api `pred_data_generator.close()`
            pred_data_generator.close()
        return 0
    # end input

    # output
    if len(preds.shape) == 1:
        rval = pd.DataFrame(preds, columns=['Predicted'])
    else:
        rval = pd.DataFrame(preds)

    rval.to_csv(outfile_predict, sep='\t', header=True, index=False)
Exemplo n.º 4
0
                     gaussian_process, kernel_approximation, metrics,
                     model_selection, naive_bayes, neighbors,
                     pipeline, preprocessing, svm, linear_model,
                     tree, discriminant_analysis)
from sklearn.exceptions import FitFailedWarning
from sklearn.metrics.scorer import _check_multimetric_scoring
from sklearn.model_selection._validation import _score, cross_validate
from sklearn.model_selection import _search, _validation
from sklearn.utils import indexable, safe_indexing

from galaxy_ml.model_validations import train_test_split
from galaxy_ml.utils import (SafeEval, get_scoring, load_model,
                             read_columns, try_get_attr, get_module)


_fit_and_score = try_get_attr('galaxy_ml.model_validations', '_fit_and_score')
setattr(_search, '_fit_and_score', _fit_and_score)
setattr(_validation, '_fit_and_score', _fit_and_score)

N_JOBS = int(__import__('os').environ.get('GALAXY_SLOTS', 1))
CACHE_DIR = './cached'
NON_SEARCHABLE = ('n_jobs', 'pre_dispatch', 'memory', '_path',
                  'nthread', 'callbacks')
ALLOWED_CALLBACKS = ('EarlyStopping', 'TerminateOnNaN', 'ReduceLROnPlateau',
                     'CSVLogger', 'None')


def _eval_swap_params(params_builder):
    swap_params = {}

    for p in params_builder['param_set']:
Exemplo n.º 5
0
def _do_train_test_split_val(searcher,
                             X,
                             y,
                             params,
                             error_score='raise',
                             primary_scoring=None,
                             groups=None,
                             outfile=None):
    """ do train test split, searchCV validates on the train and then use
    the best_estimator_ to evaluate on the test

    Returns
    --------
    Fitted SearchCV object
    """
    train_test_split = try_get_attr('galaxy_ml.model_validations',
                                    'train_test_split')
    split_options = params['outer_split']

    # splits
    if split_options['shuffle'] == 'stratified':
        split_options['labels'] = y
        X, X_test, y, y_test = train_test_split(X, y, **split_options)
    elif split_options['shuffle'] == 'group':
        if groups is None:
            raise ValueError("No group based CV option was choosen for "
                             "group shuffle!")
        split_options['labels'] = groups
        if y is None:
            X, X_test, groups, _ =\
                train_test_split(X, groups, **split_options)
        else:
            X, X_test, y, y_test, groups, _ =\
                train_test_split(X, y, groups, **split_options)
    else:
        if split_options['shuffle'] == 'None':
            split_options['shuffle'] = None
        X, X_test, y, y_test =\
            train_test_split(X, y, **split_options)

    if error_score == 'raise':
        searcher.fit(X, y, groups=groups)
    else:
        warnings.simplefilter('always', FitFailedWarning)
        with warnings.catch_warnings(record=True) as w:
            try:
                searcher.fit(X, y, groups=groups)
            except ValueError:
                pass
            for warning in w:
                print(repr(warning.message))

    scorer_ = searcher.scorer_
    if isinstance(scorer_, collections.Mapping):
        is_multimetric = True
    else:
        is_multimetric = False

    best_estimator_ = getattr(searcher, 'best_estimator_')

    # TODO Solve deep learning models in pipeline
    if best_estimator_.__class__.__name__ == 'KerasGBatchClassifier':
        test_score = best_estimator_.evaluate(X_test,
                                              scorer=scorer_,
                                              is_multimetric=is_multimetric)
    else:
        test_score = _score(best_estimator_,
                            X_test,
                            y_test,
                            scorer_,
                            is_multimetric=is_multimetric)

    if not is_multimetric:
        test_score = {primary_scoring: test_score}
    for key, value in test_score.items():
        test_score[key] = [value]
    result_df = pd.DataFrame(test_score)
    result_df.to_csv(path_or_buf=outfile, sep='\t', header=True, index=False)

    return searcher
Exemplo n.º 6
0
def main(
    inputs,
    infile_estimator,
    outfile_predict,
    infile_weights=None,
    infile1=None,
    fasta_path=None,
    ref_seq=None,
    vcf_path=None,
):
    """
    Parameter
    ---------
    inputs : str
        File path to galaxy tool parameter

    infile_estimator : strgit
        File path to trained estimator input

    outfile_predict : str
        File path to save the prediction results, tabular

    infile_weights : str
        File path to weights input

    infile1 : str
        File path to dataset containing features

    fasta_path : str
        File path to dataset containing fasta file

    ref_seq : str
        File path to dataset containing the reference genome sequence.

    vcf_path : str
        File path to dataset containing variants info.
    """
    warnings.filterwarnings("ignore")

    with open(inputs, "r") as param_handler:
        params = json.load(param_handler)

    # load model
    with open(infile_estimator, "rb") as est_handler:
        estimator = load_model(est_handler)

    main_est = estimator
    if isinstance(estimator, Pipeline):
        main_est = estimator.steps[-1][-1]
    if hasattr(main_est, "config") and hasattr(main_est, "load_weights"):
        if not infile_weights or infile_weights == "None":
            raise ValueError("The selected model skeleton asks for weights, "
                             "but dataset for weights wan not selected!")
        main_est.load_weights(infile_weights)

    # handle data input
    input_type = params["input_options"]["selected_input"]
    # tabular input
    if input_type == "tabular":
        header = "infer" if params["input_options"]["header1"] else None
        column_option = params["input_options"]["column_selector_options_1"][
            "selected_column_selector_option"]
        if column_option in [
                "by_index_number",
                "all_but_by_index_number",
                "by_header_name",
                "all_but_by_header_name",
        ]:
            c = params["input_options"]["column_selector_options_1"]["col1"]
        else:
            c = None

        df = pd.read_csv(infile1, sep="\t", header=header, parse_dates=True)

        X = read_columns(df, c=c, c_option=column_option).astype(float)

        if params["method"] == "predict":
            preds = estimator.predict(X)
        else:
            preds = estimator.predict_proba(X)

    # sparse input
    elif input_type == "sparse":
        X = mmread(open(infile1, "r"))
        if params["method"] == "predict":
            preds = estimator.predict(X)
        else:
            preds = estimator.predict_proba(X)

    # fasta input
    elif input_type == "seq_fasta":
        if not hasattr(estimator, "data_batch_generator"):
            raise ValueError("To do prediction on sequences in fasta input, "
                             "the estimator must be a `KerasGBatchClassifier`"
                             "equipped with data_batch_generator!")
        pyfaidx = get_module("pyfaidx")
        sequences = pyfaidx.Fasta(fasta_path)
        n_seqs = len(sequences.keys())
        X = np.arange(n_seqs)[:, np.newaxis]
        seq_length = estimator.data_batch_generator.seq_length
        batch_size = getattr(estimator, "batch_size", 32)
        steps = (n_seqs + batch_size - 1) // batch_size

        seq_type = params["input_options"]["seq_type"]
        klass = try_get_attr("galaxy_ml.preprocessors", seq_type)

        pred_data_generator = klass(fasta_path, seq_length=seq_length)

        if params["method"] == "predict":
            preds = estimator.predict(X,
                                      data_generator=pred_data_generator,
                                      steps=steps)
        else:
            preds = estimator.predict_proba(X,
                                            data_generator=pred_data_generator,
                                            steps=steps)

    # vcf input
    elif input_type == "variant_effect":
        klass = try_get_attr("galaxy_ml.preprocessors",
                             "GenomicVariantBatchGenerator")

        options = params["input_options"]
        options.pop("selected_input")
        if options["blacklist_regions"] == "none":
            options["blacklist_regions"] = None

        pred_data_generator = klass(ref_genome_path=ref_seq,
                                    vcf_path=vcf_path,
                                    **options)

        pred_data_generator.set_processing_attrs()

        variants = pred_data_generator.variants

        # predict 1600 sample at once then write to file
        gen_flow = pred_data_generator.flow(batch_size=1600)

        file_writer = open(outfile_predict, "w")
        header_row = "\t".join(
            ["chrom", "pos", "name", "ref", "alt", "strand"])
        file_writer.write(header_row)
        header_done = False

        steps_done = 0

        # TODO: multiple threading
        try:
            while steps_done < len(gen_flow):
                index_array = next(gen_flow.index_generator)
                batch_X = gen_flow._get_batches_of_transformed_samples(
                    index_array)

                if params["method"] == "predict":
                    batch_preds = estimator.predict(
                        batch_X,
                        # The presence of `pred_data_generator` below is to
                        # override model carrying data_generator if there
                        # is any.
                        data_generator=pred_data_generator,
                    )
                else:
                    batch_preds = estimator.predict_proba(
                        batch_X,
                        # The presence of `pred_data_generator` below is to
                        # override model carrying data_generator if there
                        # is any.
                        data_generator=pred_data_generator,
                    )

                if batch_preds.ndim == 1:
                    batch_preds = batch_preds[:, np.newaxis]

                batch_meta = variants[index_array]
                batch_out = np.column_stack([batch_meta, batch_preds])

                if not header_done:
                    heads = np.arange(batch_preds.shape[-1]).astype(str)
                    heads_str = "\t".join(heads)
                    file_writer.write("\t%s\n" % heads_str)
                    header_done = True

                for row in batch_out:
                    row_str = "\t".join(row)
                    file_writer.write("%s\n" % row_str)

                steps_done += 1

        finally:
            file_writer.close()
            # TODO: make api `pred_data_generator.close()`
            pred_data_generator.close()
        return 0
    # end input

    # output
    if len(preds.shape) == 1:
        rval = pd.DataFrame(preds, columns=["Predicted"])
    else:
        rval = pd.DataFrame(preds)

    rval.to_csv(outfile_predict, sep="\t", header=True, index=False)
Exemplo n.º 7
0
import numpy as np
import pandas as pd
from galaxy_ml.externals.selene_sdk.utils import compute_score
from galaxy_ml.keras_galaxy_models import _predict_generator
from galaxy_ml.model_validations import train_test_split
from galaxy_ml.utils import (clean_params, get_main_estimator,
                             get_module, get_scoring, load_model, read_columns,
                             SafeEval, try_get_attr)
from scipy.io import mmread
from sklearn.metrics.scorer import _check_multimetric_scoring
from sklearn.model_selection import _search, _validation
from sklearn.model_selection._validation import _score
from sklearn.pipeline import Pipeline
from sklearn.utils import indexable, safe_indexing

_fit_and_score = try_get_attr("galaxy_ml.model_validations", "_fit_and_score")
setattr(_search, "_fit_and_score", _fit_and_score)
setattr(_validation, "_fit_and_score", _fit_and_score)

N_JOBS = int(os.environ.get("GALAXY_SLOTS", 1))
CACHE_DIR = os.path.join(os.getcwd(), "cached")
del os
NON_SEARCHABLE = ("n_jobs", "pre_dispatch", "memory", "_path", "nthread", "callbacks")
ALLOWED_CALLBACKS = (
    "EarlyStopping",
    "TerminateOnNaN",
    "ReduceLROnPlateau",
    "CSVLogger",
    "None",
)
Exemplo n.º 8
0
def main(inputs, infile_estimator, infile1, infile2,
         outfile_result, outfile_object=None,
         outfile_weights=None, groups=None,
         ref_seq=None, intervals=None, targets=None,
         fasta_path=None):
    """
    Parameter
    ---------
    inputs : str
        File path to galaxy tool parameter

    infile_estimator : str
        File path to estimator

    infile1 : str
        File path to dataset containing features

    infile2 : str
        File path to dataset containing target values

    outfile_result : str
        File path to save the results, either cv_results or test result

    outfile_object : str, optional
        File path to save searchCV object

    outfile_weights : str, optional
        File path to save model weights

    groups : str
        File path to dataset containing groups labels

    ref_seq : str
        File path to dataset containing genome sequence file

    intervals : str
        File path to dataset containing interval file

    targets : str
        File path to dataset compressed target bed file

    fasta_path : str
        File path to dataset containing fasta file
    """
    warnings.simplefilter('ignore')

    with open(inputs, 'r') as param_handler:
        params = json.load(param_handler)

    params_builder = params['search_schemes']['search_params_builder']

    with open(infile_estimator, 'rb') as estimator_handler:
        estimator = load_model(estimator_handler)
    estimator_params = estimator.get_params()

    # store read dataframe object
    loaded_df = {}

    input_type = params['input_options']['selected_input']
    # tabular input
    if input_type == 'tabular':
        header = 'infer' if params['input_options']['header1'] else None
        column_option = (params['input_options']['column_selector_options_1']
                         ['selected_column_selector_option'])
        if column_option in ['by_index_number', 'all_but_by_index_number',
                             'by_header_name', 'all_but_by_header_name']:
            c = params['input_options']['column_selector_options_1']['col1']
        else:
            c = None

        df_key = infile1 + repr(header)
        df = pd.read_csv(infile1, sep='\t', header=header,
                         parse_dates=True)
        loaded_df[df_key] = df

        X = read_columns(df, c=c, c_option=column_option).astype(float)
    # sparse input
    elif input_type == 'sparse':
        X = mmread(open(infile1, 'r'))

    # fasta_file input
    elif input_type == 'seq_fasta':
        pyfaidx = get_module('pyfaidx')
        sequences = pyfaidx.Fasta(fasta_path)
        n_seqs = len(sequences.keys())
        X = np.arange(n_seqs)[:, np.newaxis]
        for param in estimator_params.keys():
            if param.endswith('fasta_path'):
                estimator.set_params(
                    **{param: fasta_path})
                break
        else:
            raise ValueError(
                "The selected estimator doesn't support "
                "fasta file input! Please consider using "
                "KerasGBatchClassifier with "
                "FastaDNABatchGenerator/FastaProteinBatchGenerator "
                "or having GenomeOneHotEncoder/ProteinOneHotEncoder "
                "in pipeline!")

    elif input_type == 'refseq_and_interval':
        path_params = {
            'data_batch_generator__ref_genome_path': ref_seq,
            'data_batch_generator__intervals_path': intervals,
            'data_batch_generator__target_path': targets
        }
        estimator.set_params(**path_params)
        n_intervals = sum(1 for line in open(intervals))
        X = np.arange(n_intervals)[:, np.newaxis]

    # Get target y
    header = 'infer' if params['input_options']['header2'] else None
    column_option = (params['input_options']['column_selector_options_2']
                     ['selected_column_selector_option2'])
    if column_option in ['by_index_number', 'all_but_by_index_number',
                         'by_header_name', 'all_but_by_header_name']:
        c = params['input_options']['column_selector_options_2']['col2']
    else:
        c = None

    df_key = infile2 + repr(header)
    if df_key in loaded_df:
        infile2 = loaded_df[df_key]
    else:
        infile2 = pd.read_csv(infile2, sep='\t',
                              header=header, parse_dates=True)
        loaded_df[df_key] = infile2

    y = read_columns(
            infile2,
            c=c,
            c_option=column_option,
            sep='\t',
            header=header,
            parse_dates=True)
    if len(y.shape) == 2 and y.shape[1] == 1:
        y = y.ravel()
    if input_type == 'refseq_and_interval':
        estimator.set_params(
            data_batch_generator__features=y.ravel().tolist())
        y = None
    # end y

    optimizer = params['search_schemes']['selected_search_scheme']
    optimizer = getattr(model_selection, optimizer)

    # handle gridsearchcv options
    options = params['search_schemes']['options']

    if groups:
        header = 'infer' if (options['cv_selector']['groups_selector']
                                    ['header_g']) else None
        column_option = (options['cv_selector']['groups_selector']
                                ['column_selector_options_g']
                                ['selected_column_selector_option_g'])
        if column_option in ['by_index_number', 'all_but_by_index_number',
                             'by_header_name', 'all_but_by_header_name']:
            c = (options['cv_selector']['groups_selector']
                        ['column_selector_options_g']['col_g'])
        else:
            c = None

        df_key = groups + repr(header)
        if df_key in loaded_df:
            groups = loaded_df[df_key]

        groups = read_columns(
                groups,
                c=c,
                c_option=column_option,
                sep='\t',
                header=header,
                parse_dates=True)
        groups = groups.ravel()
        options['cv_selector']['groups_selector'] = groups

    splitter, groups = get_cv(options.pop('cv_selector'))
    options['cv'] = splitter
    options['n_jobs'] = N_JOBS
    primary_scoring = options['scoring']['primary_scoring']
    options['scoring'] = get_scoring(options['scoring'])
    if options['error_score']:
        options['error_score'] = 'raise'
    else:
        options['error_score'] = np.NaN
    if options['refit'] and isinstance(options['scoring'], dict):
        options['refit'] = primary_scoring
    if 'pre_dispatch' in options and options['pre_dispatch'] == '':
        options['pre_dispatch'] = None

    # del loaded_df
    del loaded_df

    # handle memory
    memory = joblib.Memory(location=CACHE_DIR, verbose=0)
    # cache iraps_core fits could increase search speed significantly
    if estimator.__class__.__name__ == 'IRAPSClassifier':
        estimator.set_params(memory=memory)
    else:
        # For iraps buried in pipeline
        for p, v in estimator_params.items():
            if p.endswith('memory'):
                # for case of `__irapsclassifier__memory`
                if len(p) > 8 and p[:-8].endswith('irapsclassifier'):
                    # cache iraps_core fits could increase search
                    # speed significantly
                    new_params = {p: memory}
                    estimator.set_params(**new_params)
                # security reason, we don't want memory being
                # modified unexpectedly
                elif v:
                    new_params = {p, None}
                    estimator.set_params(**new_params)
            # For now, 1 CPU is suggested for iprasclassifier
            elif p.endswith('n_jobs'):
                new_params = {p: 1}
                estimator.set_params(**new_params)
            # for security reason, types of callbacks are limited
            elif p.endswith('callbacks'):
                for cb in v:
                    cb_type = cb['callback_selection']['callback_type']
                    if cb_type not in ALLOWED_CALLBACKS:
                        raise ValueError(
                            "Prohibited callback type: %s!" % cb_type)

    param_grid = _eval_search_params(params_builder)
    searcher = optimizer(estimator, param_grid, **options)

    # do nested split
    split_mode = params['outer_split'].pop('split_mode')
    # nested CV, outer cv using cross_validate
    if split_mode == 'nested_cv':
        outer_cv, _ = get_cv(params['outer_split']['cv_selector'])

        if options['error_score'] == 'raise':
            rval = cross_validate(
                searcher, X, y, scoring=options['scoring'],
                cv=outer_cv, n_jobs=N_JOBS, verbose=0,
                error_score=options['error_score'])
        else:
            warnings.simplefilter('always', FitFailedWarning)
            with warnings.catch_warnings(record=True) as w:
                try:
                    rval = cross_validate(
                        searcher, X, y,
                        scoring=options['scoring'],
                        cv=outer_cv, n_jobs=N_JOBS,
                        verbose=0,
                        error_score=options['error_score'])
                except ValueError:
                    pass
                for warning in w:
                    print(repr(warning.message))

        keys = list(rval.keys())
        for k in keys:
            if k.startswith('test'):
                rval['mean_' + k] = np.mean(rval[k])
                rval['std_' + k] = np.std(rval[k])
            if k.endswith('time'):
                rval.pop(k)
        rval = pd.DataFrame(rval)
        rval = rval[sorted(rval.columns)]
        rval.to_csv(path_or_buf=outfile_result, sep='\t',
                    header=True, index=False)
    else:
        if split_mode == 'train_test_split':
            train_test_split = try_get_attr(
                'galaxy_ml.model_validations', 'train_test_split')
            # make sure refit is choosen
            # this could be True for sklearn models, but not the case for
            # deep learning models
            if not options['refit'] and \
                    not all(hasattr(estimator, attr)
                            for attr in ('config', 'model_type')):
                warnings.warn("Refit is change to `True` for nested "
                              "validation!")
                setattr(searcher, 'refit', True)
            split_options = params['outer_split']

            # splits
            if split_options['shuffle'] == 'stratified':
                split_options['labels'] = y
                X, X_test, y, y_test = train_test_split(X, y, **split_options)
            elif split_options['shuffle'] == 'group':
                if groups is None:
                    raise ValueError("No group based CV option was "
                                     "choosen for group shuffle!")
                split_options['labels'] = groups
                if y is None:
                    X, X_test, groups, _ =\
                        train_test_split(X, groups, **split_options)
                else:
                    X, X_test, y, y_test, groups, _ =\
                        train_test_split(X, y, groups, **split_options)
            else:
                if split_options['shuffle'] == 'None':
                    split_options['shuffle'] = None
                X, X_test, y, y_test =\
                    train_test_split(X, y, **split_options)
        # end train_test_split

        # shared by both train_test_split and non-split
        if options['error_score'] == 'raise':
            searcher.fit(X, y, groups=groups)
        else:
            warnings.simplefilter('always', FitFailedWarning)
            with warnings.catch_warnings(record=True) as w:
                try:
                    searcher.fit(X, y, groups=groups)
                except ValueError:
                    pass
                for warning in w:
                    print(repr(warning.message))

        # no outer split
        if split_mode == 'no':
            # save results
            cv_results = pd.DataFrame(searcher.cv_results_)
            cv_results = cv_results[sorted(cv_results.columns)]
            cv_results.to_csv(path_or_buf=outfile_result, sep='\t',
                              header=True, index=False)

        # train_test_split, output test result using best_estimator_
        # or rebuild the trained estimator using weights if applicable.
        else:
            scorer_ = searcher.scorer_
            if isinstance(scorer_, collections.Mapping):
                is_multimetric = True
            else:
                is_multimetric = False

            best_estimator_ = getattr(searcher, 'best_estimator_', None)
            if not best_estimator_:
                raise ValueError("GridSearchCV object has no "
                                 "`best_estimator_` when `refit`=False!")

            if best_estimator_.__class__.__name__ == 'KerasGBatchClassifier' \
                    and hasattr(estimator.data_batch_generator, 'target_path'):
                test_score = best_estimator_.evaluate(
                    X_test, scorer=scorer_, is_multimetric=is_multimetric)
            else:
                test_score = _score(best_estimator_, X_test,
                                    y_test, scorer_,
                                    is_multimetric=is_multimetric)

            if not is_multimetric:
                test_score = {primary_scoring: test_score}
            for key, value in test_score.items():
                test_score[key] = [value]
            result_df = pd.DataFrame(test_score)
            result_df.to_csv(path_or_buf=outfile_result, sep='\t',
                             header=True, index=False)

    memory.clear(warn=False)

    if outfile_object:
        best_estimator_ = getattr(searcher, 'best_estimator_', None)
        if not best_estimator_:
            warnings.warn("GridSearchCV object has no attribute "
                          "'best_estimator_', because either it's "
                          "nested gridsearch or `refit` is False!")
            return

        main_est = best_estimator_
        if isinstance(best_estimator_, pipeline.Pipeline):
            main_est = best_estimator_.steps[-1][-1]

        if hasattr(main_est, 'model_') \
                and hasattr(main_est, 'save_weights'):
            if outfile_weights:
                main_est.save_weights(outfile_weights)
            del main_est.model_
            del main_est.fit_params
            del main_est.model_class_
            del main_est.validation_data
            if getattr(main_est, 'data_generator_', None):
                del main_est.data_generator_
                del main_est.data_batch_generator

        with open(outfile_object, 'wb') as output_handler:
            pickle.dump(best_estimator_, output_handler,
                        pickle.HIGHEST_PROTOCOL)
Exemplo n.º 9
0
def build_keras_model(inputs,
                      outfile,
                      model_json,
                      infile_weights=None,
                      batch_mode=False,
                      outfile_params=None):
    """ for `keras_model_builder` tool

    Parameters
    ----------
    inputs : dict
        loaded galaxy tool parameters from `keras_model_builder` tool.
    outfile : str
        Path to galaxy dataset containing the keras_galaxy model output.
    model_json : str
        Path to dataset containing keras model JSON.
    infile_weights : str or None
        If string, path to dataset containing model weights.
    batch_mode : bool, default=False
        Whether to build online batch classifier.
    outfile_params : str, default=None
        File path to search parameters output.
    """
    with open(model_json, 'r') as f:
        json_model = json.load(f)

    config = json_model['config']

    options = {}

    if json_model['class_name'] == 'Sequential':
        options['model_type'] = 'sequential'
        klass = Sequential
    elif json_model['class_name'] == 'Model':
        options['model_type'] = 'functional'
        klass = Model
    else:
        raise ValueError("Unknow Keras model class: %s" %
                         json_model['class_name'])

    # load prefitted model
    if inputs['mode_selection']['mode_type'] == 'prefitted':
        estimator = klass.from_config(config)
        estimator.load_weights(infile_weights)
    # build train model
    else:
        cls_name = inputs['mode_selection']['learning_type']
        klass = try_get_attr('galaxy_ml.keras_galaxy_models', cls_name)

        options['loss'] = (inputs['mode_selection']['compile_params']['loss'])
        options['optimizer'] =\
            (inputs['mode_selection']['compile_params']
             ['optimizer_selection']['optimizer_type']).lower()

        options.update((inputs['mode_selection']['compile_params']
                        ['optimizer_selection']['optimizer_options']))

        train_metrics = (
            inputs['mode_selection']['compile_params']['metrics']).split(',')
        if train_metrics[-1] == 'none':
            train_metrics = train_metrics[:-1]
        options['metrics'] = train_metrics

        options.update(inputs['mode_selection']['fit_params'])
        options['seed'] = inputs['mode_selection']['random_seed']

        if batch_mode:
            generator = get_batch_generator(
                inputs['mode_selection']['generator_selection'])
            options['data_batch_generator'] = generator
            options['prediction_steps'] = \
                inputs['mode_selection']['prediction_steps']
            options['class_positive_factor'] = \
                inputs['mode_selection']['class_positive_factor']
        estimator = klass(config, **options)
        if outfile_params:
            hyper_params = get_search_params(estimator)
            # TODO: remove this after making `verbose` tunable
            for h_param in hyper_params:
                if h_param[1].endswith('verbose'):
                    h_param[0] = '@'
            df = pd.DataFrame(hyper_params, columns=['', 'Parameter', 'Value'])
            df.to_csv(outfile_params, sep='\t', index=False)

    print(repr(estimator))
    # save model by pickle
    with open(outfile, 'wb') as f:
        pickle.dump(estimator, f, pickle.HIGHEST_PROTOCOL)
Exemplo n.º 10
0
def main(inputs,
         infile_estimator,
         infile1,
         infile2,
         outfile_result,
         outfile_object=None,
         outfile_weights=None,
         groups=None,
         ref_seq=None,
         intervals=None,
         targets=None,
         fasta_path=None):
    """
    Parameter
    ---------
    inputs : str
        File path to galaxy tool parameter

    infile_estimator : str
        File path to estimator

    infile1 : str
        File path to dataset containing features

    infile2 : str
        File path to dataset containing target values

    outfile_result : str
        File path to save the results, either cv_results or test result

    outfile_object : str, optional
        File path to save searchCV object

    outfile_weights : str, optional
        File path to save model weights

    groups : str
        File path to dataset containing groups labels

    ref_seq : str
        File path to dataset containing genome sequence file

    intervals : str
        File path to dataset containing interval file

    targets : str
        File path to dataset compressed target bed file

    fasta_path : str
        File path to dataset containing fasta file
    """
    warnings.simplefilter('ignore')

    # store read dataframe object
    loaded_df = {}

    with open(inputs, 'r') as param_handler:
        params = json.load(param_handler)

    # Override the refit parameter
    params['search_schemes']['options']['refit'] = True \
        if (params['save'] != 'nope' or
            params['outer_split']['split_mode'] == 'nested_cv') else False

    with open(infile_estimator, 'rb') as estimator_handler:
        estimator = load_model(estimator_handler)

    if estimator.__class__.__name__ == 'KerasGBatchClassifier':
        _fit_and_score = try_get_attr('galaxy_ml.model_validations',
                                      '_fit_and_score')

        setattr(_search, '_fit_and_score', _fit_and_score)
        setattr(_validation, '_fit_and_score', _fit_and_score)

    optimizer = params['search_schemes']['selected_search_scheme']
    if optimizer == 'skopt.BayesSearchCV':
        optimizer = BayesSearchCV
    else:
        optimizer = getattr(model_selection, optimizer)

    # handle gridsearchcv options
    options = params['search_schemes']['options']

    if groups:
        header = 'infer' if (
            options['cv_selector']['groups_selector']['header_g']) else None
        column_option = (
            options['cv_selector']['groups_selector']
            ['column_selector_options_g']['selected_column_selector_option_g'])
        if column_option in [
                'by_index_number', 'all_but_by_index_number', 'by_header_name',
                'all_but_by_header_name'
        ]:
            c = (options['cv_selector']['groups_selector']
                 ['column_selector_options_g']['col_g'])
        else:
            c = None

        df_key = groups + repr(header)

        groups = pd.read_csv(groups, sep='\t', header=header, parse_dates=True)
        loaded_df[df_key] = groups

        groups = read_columns(groups,
                              c=c,
                              c_option=column_option,
                              sep='\t',
                              header=header,
                              parse_dates=True)
        groups = groups.ravel()
        options['cv_selector']['groups_selector'] = groups

    cv_selector = options.pop('cv_selector')
    if Version(galaxy_ml_version) < Version('0.8.3'):
        cv_selector.pop('n_stratification_bins', None)
    splitter, groups = get_cv(cv_selector)
    options['cv'] = splitter
    primary_scoring = options['scoring']['primary_scoring']
    options['scoring'] = get_scoring(options['scoring'])
    # TODO make BayesSearchCV support multiple scoring
    if optimizer == 'skopt.BayesSearchCV' and \
            isinstance(options['scoring'], dict):
        options['scoring'] = options['scoring'][primary_scoring]
        warnings.warn("BayesSearchCV doesn't support multiple "
                      "scorings! Primary scoring is used.")
    if options['error_score']:
        options['error_score'] = 'raise'
    else:
        options['error_score'] = np.NaN
    if options['refit'] and isinstance(options['scoring'], dict):
        options['refit'] = primary_scoring
    if 'pre_dispatch' in options and options['pre_dispatch'] == '':
        options['pre_dispatch'] = None

    params_builder = params['search_schemes']['search_params_builder']
    param_grid = _eval_search_params(params_builder)

    estimator = clean_params(estimator)

    # save the SearchCV object without fit
    if params['save'] == 'save_no_fit':
        searcher = optimizer(estimator, param_grid, **options)
        print(searcher)
        with open(outfile_object, 'wb') as output_handler:
            pickle.dump(searcher, output_handler, pickle.HIGHEST_PROTOCOL)
        return 0

    # read inputs and loads new attributes, like paths
    estimator, X, y = _handle_X_y(estimator,
                                  params,
                                  infile1,
                                  infile2,
                                  loaded_df=loaded_df,
                                  ref_seq=ref_seq,
                                  intervals=intervals,
                                  targets=targets,
                                  fasta_path=fasta_path)

    # cache iraps_core fits could increase search speed significantly
    memory = joblib.Memory(location=CACHE_DIR, verbose=0)
    estimator = _set_memory(estimator, memory)

    searcher = optimizer(estimator, param_grid, **options)

    split_mode = params['outer_split'].pop('split_mode')

    # Nested CV
    if split_mode == 'nested_cv':
        cv_selector = params['outer_split']['cv_selector']
        if Version(galaxy_ml_version) < Version('0.8.3'):
            cv_selector.pop('n_stratification_bins', None)
        outer_cv, _ = get_cv(cv_selector)
        # nested CV, outer cv using cross_validate
        if options['error_score'] == 'raise':
            rval = cross_validate(
                searcher,
                X,
                y,
                groups=groups,
                scoring=options['scoring'],
                cv=outer_cv,
                n_jobs=N_JOBS,
                verbose=options['verbose'],
                fit_params={'groups': groups},
                return_estimator=(params['save'] == 'save_estimator'),
                error_score=options['error_score'],
                return_train_score=True)
        else:
            warnings.simplefilter('always', FitFailedWarning)
            with warnings.catch_warnings(record=True) as w:
                try:
                    rval = cross_validate(
                        searcher,
                        X,
                        y,
                        groups=groups,
                        scoring=options['scoring'],
                        cv=outer_cv,
                        n_jobs=N_JOBS,
                        verbose=options['verbose'],
                        fit_params={'groups': groups},
                        return_estimator=(params['save'] == 'save_estimator'),
                        error_score=options['error_score'],
                        return_train_score=True)
                except ValueError:
                    pass
                for warning in w:
                    print(repr(warning.message))

        fitted_searchers = rval.pop('estimator', [])
        if fitted_searchers:
            import os
            pwd = os.getcwd()
            save_dir = os.path.join(pwd, 'cv_results_in_folds')
            try:
                os.mkdir(save_dir)
                for idx, obj in enumerate(fitted_searchers):
                    target_name = 'cv_results_' + '_' + 'split%d' % idx
                    target_path = os.path.join(pwd, save_dir, target_name)
                    cv_results_ = getattr(obj, 'cv_results_', None)
                    if not cv_results_:
                        print("%s is not available" % target_name)
                        continue
                    cv_results_ = pd.DataFrame(cv_results_)
                    cv_results_ = cv_results_[sorted(cv_results_.columns)]
                    cv_results_.to_csv(target_path,
                                       sep='\t',
                                       header=True,
                                       index=False)
            except Exception as e:
                print(e)
            finally:
                del os

        keys = list(rval.keys())
        for k in keys:
            if k.startswith('test'):
                rval['mean_' + k] = np.mean(rval[k])
                rval['std_' + k] = np.std(rval[k])
            if k.endswith('time'):
                rval.pop(k)
        rval = pd.DataFrame(rval)
        rval = rval[sorted(rval.columns)]
        rval.to_csv(path_or_buf=outfile_result,
                    sep='\t',
                    header=True,
                    index=False)

        return 0

        # deprecate train test split mode
        """searcher = _do_train_test_split_val(
            searcher, X, y, params,
            primary_scoring=primary_scoring,
            error_score=options['error_score'],
            groups=groups,
            outfile=outfile_result)"""

    # no outer split
    else:
        searcher.set_params(n_jobs=N_JOBS)
        if options['error_score'] == 'raise':
            searcher.fit(X, y, groups=groups)
        else:
            warnings.simplefilter('always', FitFailedWarning)
            with warnings.catch_warnings(record=True) as w:
                try:
                    searcher.fit(X, y, groups=groups)
                except ValueError:
                    pass
                for warning in w:
                    print(repr(warning.message))

        cv_results = pd.DataFrame(searcher.cv_results_)
        cv_results = cv_results[sorted(cv_results.columns)]
        cv_results.to_csv(path_or_buf=outfile_result,
                          sep='\t',
                          header=True,
                          index=False)

    memory.clear(warn=False)

    # output best estimator, and weights if applicable
    if outfile_object:
        best_estimator_ = getattr(searcher, 'best_estimator_', None)
        if not best_estimator_:
            warnings.warn("GridSearchCV object has no attribute "
                          "'best_estimator_', because either it's "
                          "nested gridsearch or `refit` is False!")
            return

        # clean prams
        best_estimator_ = clean_params(best_estimator_)

        main_est = get_main_estimator(best_estimator_)

        if hasattr(main_est, 'model_') \
                and hasattr(main_est, 'save_weights'):
            if outfile_weights:
                main_est.save_weights(outfile_weights)
            del main_est.model_
            del main_est.fit_params
            del main_est.model_class_
            main_est.callbacks = []
            if getattr(main_est, 'data_generator_', None):
                del main_est.data_generator_

        with open(outfile_object, 'wb') as output_handler:
            print("Best estimator is saved: %s " % repr(best_estimator_))
            pickle.dump(best_estimator_, output_handler,
                        pickle.HIGHEST_PROTOCOL)
Exemplo n.º 11
0
def build_keras_model(
    inputs,
    outfile,
    model_json,
    infile_weights=None,
    batch_mode=False,
    outfile_params=None,
):
    """
    for `keras_model_builder` tool

    Parameters
    ----------
    inputs : dict
        loaded galaxy tool parameters from `keras_model_builder` tool.
    outfile : str
        Path to galaxy dataset containing the keras_galaxy model output.
    model_json : str
        Path to dataset containing keras model JSON.
    infile_weights : str or None
        If string, path to dataset containing model weights.
    batch_mode : bool, default=False
        Whether to build online batch classifier.
    outfile_params : str, default=None
        File path to search parameters output.
    """
    with open(model_json, "r") as f:
        json_model = json.load(f)

    config = json_model["config"]

    options = {}

    if json_model["class_name"] == "Sequential":
        options["model_type"] = "sequential"
        klass = Sequential
    elif json_model["class_name"] == "Model":
        options["model_type"] = "functional"
        klass = Model
    else:
        raise ValueError("Unknow Keras model class: %s" %
                         json_model["class_name"])

    # load prefitted model
    if inputs["mode_selection"]["mode_type"] == "prefitted":
        estimator = klass.from_config(config)
        estimator.load_weights(infile_weights)
    # build train model
    else:
        cls_name = inputs["mode_selection"]["learning_type"]
        klass = try_get_attr("galaxy_ml.keras_galaxy_models", cls_name)

        options["loss"] = inputs["mode_selection"]["compile_params"]["loss"]
        options["optimizer"] = (
            inputs["mode_selection"]["compile_params"]["optimizer_selection"]
            ["optimizer_type"]).lower()

        options.update((inputs["mode_selection"]["compile_params"]
                        ["optimizer_selection"]["optimizer_options"]))

        train_metrics = inputs["mode_selection"]["compile_params"]["metrics"]
        if train_metrics[-1] == "none":
            train_metrics = train_metrics[:-1]
        options["metrics"] = train_metrics

        options.update(inputs["mode_selection"]["fit_params"])
        options["seed"] = inputs["mode_selection"]["random_seed"]

        if batch_mode:
            generator = get_batch_generator(
                inputs["mode_selection"]["generator_selection"])
            options["data_batch_generator"] = generator
            options["prediction_steps"] = inputs["mode_selection"][
                "prediction_steps"]
            options["class_positive_factor"] = inputs["mode_selection"][
                "class_positive_factor"]
        estimator = klass(config, **options)
        if outfile_params:
            hyper_params = get_search_params(estimator)
            # TODO: remove this after making `verbose` tunable
            for h_param in hyper_params:
                if h_param[1].endswith("verbose"):
                    h_param[0] = "@"
            df = pd.DataFrame(hyper_params, columns=["", "Parameter", "Value"])
            df.to_csv(outfile_params, sep="\t", index=False)

    print(repr(estimator))
    # save model by pickle
    with open(outfile, "wb") as f:
        pickle.dump(estimator, f, pickle.HIGHEST_PROTOCOL)
Exemplo n.º 12
0
def main(inputs,
         infile_estimator,
         outfile_predict,
         infile_weights=None,
         infile1=None,
         fasta_path=None,
         ref_seq=None,
         vcf_path=None):
    """
    Parameter
    ---------
    inputs : str
        File path to galaxy tool parameter

    infile_estimator : strgit
        File path to trained estimator input

    outfile_predict : str
        File path to save the prediction results, tabular

    infile_weights : str
        File path to weights input

    infile1 : str
        File path to dataset containing features

    fasta_path : str
        File path to dataset containing fasta file

    ref_seq : str
        File path to dataset containing the reference genome sequence.

    vcf_path : str
        File path to dataset containing variants info.
    """
    warnings.filterwarnings('ignore')

    with open(inputs, 'r') as param_handler:
        params = json.load(param_handler)

    # load model
    with open(infile_estimator, 'rb') as est_handler:
        estimator = load_model(est_handler)

    main_est = estimator
    if isinstance(estimator, Pipeline):
        main_est = estimator.steps[-1][-1]
    if hasattr(main_est, 'config') and hasattr(main_est, 'load_weights'):
        if not infile_weights or infile_weights == 'None':
            raise ValueError("The selected model skeleton asks for weights, "
                             "but dataset for weights wan not selected!")
        main_est.load_weights(infile_weights)

    # handle data input
    input_type = params['input_options']['selected_input']
    # tabular input
    if input_type == 'tabular':
        header = 'infer' if params['input_options']['header1'] else None
        column_option = (params['input_options']['column_selector_options_1']
                         ['selected_column_selector_option'])
        if column_option in [
                'by_index_number', 'all_but_by_index_number', 'by_header_name',
                'all_but_by_header_name'
        ]:
            c = params['input_options']['column_selector_options_1']['col1']
        else:
            c = None

        df = pd.read_csv(infile1, sep='\t', header=header, parse_dates=True)

        X = read_columns(df, c=c, c_option=column_option).astype(float)

        if params['method'] == 'predict':
            preds = estimator.predict(X)
        else:
            preds = estimator.predict_proba(X)

    # sparse input
    elif input_type == 'sparse':
        X = mmread(open(infile1, 'r'))
        if params['method'] == 'predict':
            preds = estimator.predict(X)
        else:
            preds = estimator.predict_proba(X)

    # fasta input
    elif input_type == 'seq_fasta':
        if not hasattr(estimator, 'data_batch_generator'):
            raise ValueError("To do prediction on sequences in fasta input, "
                             "the estimator must be a `KerasGBatchClassifier`"
                             "equipped with data_batch_generator!")
        pyfaidx = get_module('pyfaidx')
        sequences = pyfaidx.Fasta(fasta_path)
        n_seqs = len(sequences.keys())
        X = np.arange(n_seqs)[:, np.newaxis]
        seq_length = estimator.data_batch_generator.seq_length
        batch_size = getattr(estimator, 'batch_size', 32)
        steps = (n_seqs + batch_size - 1) // batch_size

        seq_type = params['input_options']['seq_type']
        klass = try_get_attr('galaxy_ml.preprocessors', seq_type)

        pred_data_generator = klass(fasta_path, seq_length=seq_length)

        if params['method'] == 'predict':
            preds = estimator.predict(X,
                                      data_generator=pred_data_generator,
                                      steps=steps)
        else:
            preds = estimator.predict_proba(X,
                                            data_generator=pred_data_generator,
                                            steps=steps)

    # vcf input
    elif input_type == 'variant_effect':
        klass = try_get_attr('galaxy_ml.preprocessors',
                             'GenomicVariantBatchGenerator')

        options = params['input_options']
        options.pop('selected_input')
        if options['blacklist_regions'] == 'none':
            options['blacklist_regions'] = None

        pred_data_generator = klass(ref_genome_path=ref_seq,
                                    vcf_path=vcf_path,
                                    **options)

        pred_data_generator.fit()

        preds = estimator.model_.predict_generator(
            pred_data_generator.flow(batch_size=32),
            workers=N_JOBS,
            use_multiprocessing=True)

        if preds.min() < 0. or preds.max() > 1.:
            warnings.warn('Network returning invalid probability values. '
                          'The last layer might not normalize predictions '
                          'into probabilities '
                          '(like softmax or sigmoid would).')

        if params['method'] == 'predict_proba' and preds.shape[1] == 1:
            # first column is probability of class 0 and second is of class 1
            preds = np.hstack([1 - preds, preds])

        elif params['method'] == 'predict':
            if preds.shape[-1] > 1:
                # if the last activation is `softmax`, the sum of all
                # probibilities will 1, the classification is considered as
                # multi-class problem, otherwise, we take it as multi-label.
                act = getattr(estimator.model_.layers[-1], 'activation', None)
                if act and act.__name__ == 'softmax':
                    classes = preds.argmax(axis=-1)
                else:
                    preds = (preds > 0.5).astype('int32')
            else:
                classes = (preds > 0.5).astype('int32')

            preds = estimator.classes_[classes]
    # end input

    # output
    if input_type == 'variant_effect':  # TODO: save in batchs
        rval = pd.DataFrame(preds)
        meta = pd.DataFrame(
            pred_data_generator.variants,
            columns=['chrom', 'pos', 'name', 'ref', 'alt', 'strand'])

        rval = pd.concat([meta, rval], axis=1)

    elif len(preds.shape) == 1:
        rval = pd.DataFrame(preds, columns=['Predicted'])
    else:
        rval = pd.DataFrame(preds)

    rval.to_csv(outfile_predict, sep='\t', header=True, index=False)