def predict(seq, aa_cut=0, percent_peptide=0, model=None, model_file=None): assert not (model is None and model_file is None ), "you have to specify either a model or a model_file" if model is None: try: with open(model_file, 'rb') as f: model, learn_options = pickle.load(f) except: raise Exception("could not find model stored to file %s" % model_file) else: model, learn_options = model learn_options["V"] = 2 # Y, feature_sets, target_genes, learn_options, num_proc = setup(test=False, order=2, learn_options=learn_options, data_file=test_filename) # inputs, dim, dimsum, feature_names = pd.concatenate_feature_sets(feature_sets) Xdf = pandas.DataFrame(columns=[u'30mer', u'Strand'], data=[[seq, 'NA']]) gene_position = pandas.DataFrame( columns=[u'Percent Peptide', u'Amino Acid Cut position'], data=[[percent_peptide, aa_cut]]) feature_sets = feat.featurize_data(Xdf, learn_options, pandas.DataFrame(), gene_position) inputs, dim, dimsum, feature_names = util.concatenate_feature_sets( feature_sets) # call to scikit-learn, returns a vector of predicted values return model.predict(inputs)[0]
def predict(seq, aa_cut=0, percent_peptide=0, model=None, model_file=None): assert not (model is None and model_file is None), "you have to specify either a model or a model_file" if model is None: try: with open(model_file, 'rb') as f: model, learn_options = pickle.load(f) except: raise Exception("could not find model stored to file %s" % model_file) else: model, learn_options = model learn_options["V"] = 2 # Y, feature_sets, target_genes, learn_options, num_proc = setup(test=False, order=2, learn_options=learn_options, data_file=test_filename) # inputs, dim, dimsum, feature_names = pd.concatenate_feature_sets(feature_sets) Xdf = pandas.DataFrame(columns=[u'30mer', u'Strand'], data=[[seq, 'NA']]) gene_position = pandas.DataFrame(columns=[u'Percent Peptide', u'Amino Acid Cut position'], data=[[percent_peptide, aa_cut]]) feature_sets = feat.featurize_data(Xdf, learn_options, pandas.DataFrame(), gene_position) inputs, dim, dimsum, feature_names = util.concatenate_feature_sets(feature_sets) # call to scikit-learn, returns a vector of predicted values return model.predict(inputs)[0]
def cross_validate(y_all, feature_sets, learn_options=None, TEST=False, train_genes=None, CV=True): """ feature_sets is a dictionary of "set name" to pandas.DataFrame one set might be single-nucleotide, position-independent features of order X, for e.g. Method: "GPy" or "linreg" Metric: NDCG (learning to rank metric, Normalized Discounted Cumulative Gain); AUC Output: cv_score_median, gene_rocs """ allowed_methods = [ "GPy", "linreg", "AdaBoostRegressor", "DecisionTreeRegressor", "RandomForestRegressor", "ARDRegression", "GPy_fs", "mean", "random", "DNN", "lasso_ensemble", "doench", "logregL1", "sgrna_from_doench", ] assert learn_options["method"] in allowed_methods, "invalid method: %s" % learn_options["method"] assert ( learn_options["method"] == "linreg" and learn_options["penalty"] == "L2" or learn_options["weighted"] is None ), "weighted only works with linreg L2 right now" # construct filename from options filename = construct_filename(learn_options, TEST) print "Cross-validating genes..." t2 = time.time() y = np.array(y_all[learn_options["target_name"]].values[:, None], dtype=np.float64) # concatenate feature sets in to one nparray, and get dimension of each inputs, dim, dimsum, feature_names = util.concatenate_feature_sets(feature_sets) if not CV: assert ( learn_options["cv"] == "gene" ), "Can only use gene-CV when CV is False (I need to use all of the genes and stratified complicates that)" # set-up for cross-validation ## for outer loop, the one Doench et al use genes for if learn_options["cv"] == "stratified": assert not learn_options[ "extra pairs" ], "can't use extra pairs with stratified CV, need to figure out how to properly account for genes affected by two drugs" label_encoder = sklearn.preprocessing.LabelEncoder() label_encoder.fit(y_all["Target gene"].values) gene_classes = label_encoder.transform(y_all["Target gene"].values) if learn_options["train_genes"] is not None and learn_options["test_genes"] is not None: n_folds = len(learn_options["test_genes"]) else: n_folds = len(learn_options["all_genes"]) cv = sklearn.cross_validation.StratifiedKFold(gene_classes, n_folds=n_folds, shuffle=True, indices=True) fold_labels = ["fold%d" % i for i in range(1, n_folds + 1)] if learn_options["num_genes_remove_train"] is not None: raise NotImplementedException() elif learn_options["cv"] == "gene": cv = [] if not CV: train_test_tmp = get_train_test("dummy", y_all) # get train, test split using a dummy gene train_tmp, test_tmp = train_test_tmp # not a typo, using training set to test on as well, just for this case. Test set is not used # for internal cross-val, etc. anyway. train_test_tmp = (train_tmp, train_tmp) cv.append(train_test_tmp) fold_labels = learn_options["all_genes"] elif learn_options["train_genes"] is not None and learn_options["test_genes"] is not None: assert ( learn_options["train_genes"] is not None and learn_options["test_genes"] is not None ), "use both or neither" for i, gene in enumerate(learn_options["test_genes"]): cv.append(get_train_test(gene, y_all, learn_options["train_genes"])) fold_labels = learn_options["test_genes"] # if train and test genes are seperate, there should be only one fold train_test_disjoint = set.isdisjoint( set(learn_options["train_genes"].tolist()), set(learn_options["test_genes"].tolist()) ) else: for i, gene in enumerate(learn_options["all_genes"]): train_test_tmp = get_train_test(gene, y_all) cv.append(train_test_tmp) fold_labels = learn_options["all_genes"] if learn_options["num_genes_remove_train"] is not None: for i, (train, test) in enumerate(cv): unique_genes = np.random.permutation(np.unique(np.unique(y_all["Target gene"][train]))) genes_to_keep = unique_genes[0 : len(unique_genes) - learn_options["num_genes_remove_train"]] guides_to_keep = [] filtered_train = [] for j, gene in enumerate(y_all["Target gene"]): if j in train and gene in genes_to_keep: filtered_train.append(j) cv_i_orig = copy.deepcopy(cv[i]) cv[i] = (filtered_train, test) if learn_options["num_genes_remove_train"] == 0: assert np.all(cv_i_orig[0] == cv[i][0]) assert np.all(cv_i_orig[1] == cv[i][1]) print "# train/train after/before is %s, %s" % (len(cv[i][0]), len(cv_i_orig[0])) print "# test/test after/before is %s, %s" % (len(cv[i][1]), len(cv_i_orig[1])) else: raise Exception("invalid cv options given: %s" % learn_options["cv"]) cv = [c for c in cv] # make list from generator, so can subset for TEST case if TEST: ind_to_use = [0] # [0,1] cv = [cv[i] for i in ind_to_use] fold_labels = [fold_labels[i] for i in ind_to_use] truth = dict([(t, dict([(m, np.array([])) for m in ["raw", "ranks", "thrs"]])) for t in fold_labels]) predictions = dict([(t, np.array([])) for t in fold_labels]) m = {} metrics = [] # do the cross-validation num_proc = learn_options["num_proc"] if num_proc > 1: num_proc = np.min([num_proc, len(cv)]) print "using multiprocessing with %d procs--one for each fold" % num_proc jobs = [] pool = multiprocessing.Pool(processes=num_proc) for i, fold in enumerate(cv): train, test = fold print "working on fold %d of %d, with %d train and %d test" % (i, len(cv), len(train), len(test)) if learn_options["method"] == "GPy": job = pool.apply_async( models.GP.gp_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options) ) elif learn_options["method"] == "linreg": job = pool.apply_async( models.regression.linreg_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options), ) elif learn_options["method"] == "logregL1": job = pool.apply_async( models.regression.logreg_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options), ) elif learn_options["method"] == "AdaBoostRegressor": job = pool.apply_async( models.ensembles.adaboost_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options), ) elif learn_options["method"] == "DecisionTreeRegressor": job = pool.apply_async( models.ensembles.decisiontree_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options), ) elif learn_options["method"] == "RandomForestRegressor": job = pool.apply_async( models.ensembles.randomforest_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options), ) elif learn_options["method"] == "ARDRegression": job = pool.apply_async( models.regression.ARDRegression_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options), ) elif learn_options["method"] == "random": job = pool.apply_async( models.baselines.random_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options), ) elif learn_options["method"] == "mean": job = pool.apply_async( models.baselines.mean_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options), ) elif learn_options["method"] == "DNN": job = pool.apply_async( models.DNN.DNN_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options), ) elif learn_options["method"] == "lasso_ensemble": job = pool.apply_async( models.ensembles.LASSOs_ensemble_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options), ) elif learn_options["method"] == "doench": job = pool.apply_async( models.baselines.doench_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options), ) elif learn_options["method"] == "sgrna_from_doench": job = pool.apply_async( models.baselines.sgrna_from_doench_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options), ) else: raise Exception("did not find method=%s" % learn_options["method"]) jobs.append(job) pool.close() pool.join() for i, fold in enumerate(cv): # i in range(0,len(jobs)): y_pred, m[i] = jobs[i].get() train, test = fold if learn_options["training_metric"] == "AUC": extract_fpr_tpr_for_fold( metrics, fold_labels[i], i, predictions, truth, y_all[learn_options["ground_truth_label"]].values, test, y_pred, ) elif learn_options["training_metric"] == "NDCG": extract_NDCG_for_fold( metrics, fold_labels[i], i, predictions, truth, y_all[learn_options["ground_truth_label"]].values, test, y_pred, learn_options, ) elif learn_options["training_metric"] == "spearmanr": extract_spearman_for_fold( metrics, fold_labels[i], i, predictions, truth, y_all[learn_options["ground_truth_label"]].values, test, y_pred, learn_options, ) else: raise Exception("invalid 'training_metric' in learn_options: %s" % learn_options["training_metric"]) truth, predictions = fill_in_truth_and_predictions( truth, predictions, fold_labels[i], y_all, y_pred, learn_options, test ) pool.terminate() else: # non parallel version for i, fold in enumerate(cv): train, test = fold if learn_options["method"] == "GPy": y_pred, m[i] = gp_on_fold( models.GP.feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options ) elif learn_options["method"] == "linreg": y_pred, m[i] = models.regression.linreg_on_fold( feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options ) elif learn_options["method"] == "logregL1": y_pred, m[i] = models.regression.logreg_on_fold( feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options ) elif learn_options["method"] == "AdaBoostRegressor": y_pred, m[i] = models.ensembles.adaboost_on_fold( feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options ) elif learn_options["method"] == "DecisionTreeRegressor": y_pred, m[i] = models.ensembles.decisiontree_on_fold( feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options ) elif learn_options["method"] == "RandomForestRegressor": y_pred, m[i] = models.ensembles.randomforest_on_fold( feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options ) elif learn_options["method"] == "ARDRegression": y_pred, m[i] = models.regression.ARDRegression_on_fold( feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options ) elif learn_options["method"] == "GPy_fs": y_pred, m[i] = models.GP.gp_with_fs_on_fold( feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options ) elif learn_options["method"] == "random": y_pred, m[i] = models.baselines.random_on_fold( feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options ) elif learn_options["method"] == "mean": y_pred, m[i] = models.baselines.mean_on_fold( feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options ) elif learn_options["method"] == "DNN": y_pred, m[i] = models.DNN.DNN_on_fold( feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options ) elif learn_options["method"] == "lasso_ensemble": y_pred, m[i] = models.ensembles.LASSOs_ensemble_on_fold( feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options ) elif learn_options["method"] == "doench": y_pred, m[i] = models.baselines.doench_on_fold( feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options ) elif learn_options["method"] == "sgrna_from_doench": y_pred, m[i] = models.baselines.sgrna_from_doench_on_fold( feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options ) else: raise Exception("invalid method found: %s" % learn_options["method"]) if learn_options["training_metric"] == "AUC": # fills in truth and predictions extract_fpr_tpr_for_fold( metrics, fold_labels[i], i, predictions, truth, y_all[learn_options["ground_truth_label"]].values, test, y_pred, ) elif learn_options["training_metric"] == "NDCG": extract_NDCG_for_fold( metrics, fold_labels[i], i, predictions, truth, y_all[learn_options["ground_truth_label"]].values, test, y_pred, learn_options, ) elif learn_options["training_metric"] == "spearmanr": extract_spearman_for_fold( metrics, fold_labels[i], i, predictions, truth, y_all[learn_options["ground_truth_label"]].values, test, y_pred, learn_options, ) truth, predictions = fill_in_truth_and_predictions( truth, predictions, fold_labels[i], y_all, y_pred, learn_options, test ) print "\t\tRMSE: ", np.sqrt(((y_pred - y[test]) ** 2).mean()) print "\t\tSpearman correlation: ", util.spearmanr_nonan(y[test], y_pred)[0] print "\t\tfinished fold/gene %i of %i" % (i, len(fold_labels)) cv_median_metric = [np.median(metrics)] gene_pred = [(truth, predictions)] print "\t\tmedian %s across gene folds: %.3f" % (learn_options["training_metric"], cv_median_metric[-1]) t3 = time.time() print "\t\tElapsed time for cv is %.2f seconds" % (t3 - t2) return metrics, gene_pred, fold_labels, m, dimsum, filename, feature_names
def cross_validate(y_all, feature_sets, learn_options=None, TEST=False, train_genes=None, CV=True): ''' feature_sets is a dictionary of "set name" to pandas.DataFrame one set might be single-nucleotide, position-independent features of order X, for e.g. Method: "GPy" or "linreg" Metric: NDCG (learning to rank metric, Normalized Discounted Cumulative Gain); AUC Output: cv_score_median, gene_rocs When CV=False, it trains on everything (and tests on everything, just to fit the code) ''' print "range of y_all is [%f, %f]" % ( np.min(y_all[learn_options['target_name']].values), np.max(y_all[learn_options['target_name']].values)) allowed_methods = [ "GPy", "linreg", "AdaBoostRegressor", "AdaBoostClassifier", "DecisionTreeRegressor", "RandomForestRegressor", "ARDRegression", "GPy_fs", "mean", "random", "DNN", "lasso_ensemble", "doench", "logregL1", "sgrna_from_doench", 'SVC', 'xu_et_al' ] assert learn_options[ "method"] in allowed_methods, "invalid method: %s" % learn_options[ "method"] assert learn_options["method"] == "linreg" and learn_options[ 'penalty'] == 'L2' or learn_options[ "weighted"] is None, "weighted only works with linreg L2 right now" # construct filename from options filename = construct_filename(learn_options, TEST) print "Cross-validating genes..." t2 = time.time() y = np.array(y_all[learn_options["target_name"]].values[:, None], dtype=np.float64) # concatenate feature sets in to one nparray, and get dimension of each inputs, dim, dimsum, feature_names = util.concatenate_feature_sets( feature_sets) #import pickle; pickle.dump([y, inputs, feature_names, learn_options], open("saved_models/inputs.p", "wb" )); import ipdb; ipdb.set_trace() if not CV: assert learn_options[ 'cv'] == 'gene', 'Must use gene-CV when CV is False (I need to use all of the genes and stratified complicates that)' # set-up for cross-validation ## for outer loop, the one Doench et al use genes for if learn_options["cv"] == "stratified": assert not learn_options.has_key("extra_pairs") or learn_options[ 'extra pairs'], "can't use extra pairs with stratified CV, need to figure out how to properly account for genes affected by two drugs" label_encoder = sklearn.preprocessing.LabelEncoder() label_encoder.fit(y_all['Target gene'].values) gene_classes = label_encoder.transform(y_all['Target gene'].values) if 'n_folds' in learn_options.keys(): n_splits = learn_options['n_folds'] elif learn_options['train_genes'] is not None and learn_options[ "test_genes"] is not None: n_splits = len(learn_options["test_genes"]) else: n_splits = len(learn_options['all_genes']) skf = sklearn.model_selection.StratifiedKFold(n_splits=n_splits, shuffle=True) cv = skf.split(np.zeros(len(gene_classes), dtype=np.bool), gene_classes) fold_labels = ["fold%d" % i for i in range(1, n_folds + 1)] if learn_options['num_genes_remove_train'] is not None: raise NotImplementedException() elif learn_options["cv"] == "gene": cv = [] if not CV: train_test_tmp = get_train_test( 'dummy', y_all) # get train, test split using a dummy gene #train_tmp, test_tmp = train_test_tmp # not a typo, using training set to test on as well, just for this case. Test set is not used # for internal cross-val, etc. anyway. #train_test_tmp = (train_tmp, train_tmp) cv.append(train_test_tmp) fold_labels = ["dummy_for_no_cv"] #learn_options['all_genes'] elif learn_options['train_genes'] is not None and learn_options[ "test_genes"] is not None: assert learn_options['train_genes'] is not None and learn_options[ 'test_genes'] is not None, "use both or neither" for i, gene in enumerate(learn_options['test_genes']): cv.append( get_train_test(gene, y_all, learn_options['train_genes'])) fold_labels = learn_options["test_genes"] # if train and test genes are seperate, there should be only one fold train_test_disjoint = set.isdisjoint( set(learn_options["train_genes"].tolist()), set(learn_options["test_genes"].tolist())) else: for i, gene in enumerate(learn_options['all_genes']): train_test_tmp = get_train_test(gene, y_all) cv.append(train_test_tmp) fold_labels = learn_options['all_genes'] if learn_options['num_genes_remove_train'] is not None: for i, (train, test) in enumerate(cv): unique_genes = np.random.permutation( np.unique(np.unique(y_all['Target gene'][train]))) genes_to_keep = unique_genes[ 0:len(unique_genes) - learn_options['num_genes_remove_train']] guides_to_keep = [] filtered_train = [] for j, gene in enumerate(y_all['Target gene']): if j in train and gene in genes_to_keep: filtered_train.append(j) cv_i_orig = copy.deepcopy(cv[i]) cv[i] = (filtered_train, test) if learn_options['num_genes_remove_train'] == 0: assert np.all(cv_i_orig[0] == cv[i][0]) assert np.all(cv_i_orig[1] == cv[i][1]) print "# train/train after/before is %s, %s" % (len( cv[i][0]), len(cv_i_orig[0])) print "# test/test after/before is %s, %s" % (len( cv[i][1]), len(cv_i_orig[1])) else: raise Exception("invalid cv options given: %s" % learn_options["cv"]) cv = [c for c in cv] #make list from generator, so can subset for TEST case if TEST: ind_to_use = [0] #[0,1] cv = [cv[i] for i in ind_to_use] fold_labels = [fold_labels[i] for i in ind_to_use] truth = dict([(t, dict([(m, np.array([])) for m in ['raw', 'ranks', 'thrs']])) for t in fold_labels]) predictions = dict([(t, np.array([])) for t in fold_labels]) m = {} metrics = [] #do the cross-validation num_proc = learn_options["num_proc"] if num_proc > 1: num_proc = np.min([num_proc, len(cv)]) print "using multiprocessing with %d procs--one for each fold" % num_proc jobs = [] pool = multiprocessing.Pool(processes=num_proc) for i, fold in enumerate(cv): train, test = fold print "working on fold %d of %d, with %d train and %d test" % ( i, len(cv), len(train), len(test)) if learn_options["method"] == "GPy": job = pool.apply_async(azimuth.models.GP.gp_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)) elif learn_options["method"] == "linreg": job = pool.apply_async( azimuth.models.regression.linreg_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)) elif learn_options["method"] == "logregL1": job = pool.apply_async( azimuth.models.regression.logreg_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)) elif learn_options["method"] == "AdaBoostRegressor": job = pool.apply_async( azimuth.models.ensembles.adaboost_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options, False)) elif learn_options["method"] == "AdaBoostClassifier": job = pool.apply_async( azimuth.models.ensembles.adaboost_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options, True)) elif learn_options["method"] == "DecisionTreeRegressor": job = pool.apply_async( azimuth.models.ensembles.decisiontree_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)) elif learn_options["method"] == "RandomForestRegressor": job = pool.apply_async( azimuth.models.ensembles.randomforest_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)) elif learn_options["method"] == "ARDRegression": job = pool.apply_async( azimuth.models.regression.ARDRegression_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)) elif learn_options["method"] == "random": job = pool.apply_async(azimuth.models.baselines.random_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)) elif learn_options["method"] == "mean": job = pool.apply_async(azimuth.models.baselines.mean_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)) elif learn_options["method"] == "SVC": job = pool.apply_async(azimuth.models.baselines.SVC_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)) elif learn_options["method"] == "DNN": job = pool.apply_async(azimuth.models.DNN.DNN_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)) elif learn_options["method"] == "lasso_ensemble": job = pool.apply_async( azimuth.models.ensembles.LASSOs_ensemble_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)) elif learn_options["method"] == "doench": job = pool.apply_async(azimuth.models.baselines.doench_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)) elif learn_options["method"] == "sgrna_from_doench": job = pool.apply_async( azimuth.models.baselines.sgrna_from_doench_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)) elif learn_options["method"] == "xu_et_al": job = pool.apply_async( azimuth.models.baselines.xu_et_al_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)) else: raise Exception("did not find method=%s" % learn_options["method"]) jobs.append(job) pool.close() pool.join() for i, fold in enumerate(cv): #i in range(0,len(jobs)): y_pred, m[i] = jobs[i].get() train, test = fold if learn_options["training_metric"] == "AUC": extract_fpr_tpr_for_fold( metrics, fold_labels[i], i, predictions, truth, y_all[learn_options["ground_truth_label"]].values, test, y_pred) elif learn_options["training_metric"] == "NDCG": extract_NDCG_for_fold( metrics, fold_labels[i], i, predictions, truth, y_all[learn_options["ground_truth_label"]].values, test, y_pred, learn_options) elif learn_options["training_metric"] == 'spearmanr': extract_spearman_for_fold( metrics, fold_labels[i], i, predictions, truth, y_all[learn_options["ground_truth_label"]].values, test, y_pred, learn_options) else: raise Exception( "invalid 'training_metric' in learn_options: %s" % learn_options["training_metric"]) truth, predictions = fill_in_truth_and_predictions( truth, predictions, fold_labels[i], y_all, y_pred, learn_options, test) pool.terminate() else: # non parallel version for i, fold in enumerate(cv): train, test = fold if learn_options["method"] == "GPy": y_pred, m[i] = gp_on_fold(azimuth.models.GP.feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options) elif learn_options["method"] == "linreg": y_pred, m[i] = azimuth.models.regression.linreg_on_fold( feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options) elif learn_options["method"] == "logregL1": y_pred, m[i] = azimuth.models.regression.logreg_on_fold( feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options) elif learn_options["method"] == "AdaBoostRegressor": y_pred, m[i] = azimuth.models.ensembles.adaboost_on_fold( feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options, classification=False) elif learn_options["method"] == "AdaBoostClassifier": y_pred, m[i] = azimuth.models.ensembles.adaboost_on_fold( feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options, classification=True) elif learn_options["method"] == "DecisionTreeRegressor": y_pred, m[i] = azimuth.models.ensembles.decisiontree_on_fold( feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options) elif learn_options["method"] == "RandomForestRegressor": y_pred, m[i] = azimuth.models.ensembles.randomforest_on_fold( feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options) elif learn_options["method"] == "ARDRegression": y_pred, m[i] = azimuth.models.regression.ARDRegression_on_fold( feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options) elif learn_options["method"] == "GPy_fs": y_pred, m[i] = azimuth.models.GP.gp_with_fs_on_fold( feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options) elif learn_options["method"] == "random": y_pred, m[i] = azimuth.models.baselines.random_on_fold( feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options) elif learn_options["method"] == "mean": y_pred, m[i] = azimuth.models.baselines.mean_on_fold( feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options) elif learn_options["method"] == "SVC": y_pred, m[i] = azimuth.models.baselines.SVC_on_fold( feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options) elif learn_options["method"] == "DNN": y_pred, m[i] = azimuth.models.DNN.DNN_on_fold( feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options) elif learn_options["method"] == "lasso_ensemble": y_pred, m[ i] = azimuth.models.ensembles.LASSOs_ensemble_on_fold( feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options) elif learn_options["method"] == "doench": y_pred, m[i] = azimuth.models.baselines.doench_on_fold( feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options) elif learn_options["method"] == "sgrna_from_doench": y_pred, m[ i] = azimuth.models.baselines.sgrna_from_doench_on_fold( feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options) elif learn_options["method"] == "xu_et_al": y_pred, m[i] = azimuth.models.baselines.xu_et_al_on_fold( feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options) else: raise Exception("invalid method found: %s" % learn_options["method"]) if learn_options["training_metric"] == "AUC": # fills in truth and predictions extract_fpr_tpr_for_fold( metrics, fold_labels[i], i, predictions, truth, y_all[learn_options['ground_truth_label']].values, test, y_pred) elif learn_options["training_metric"] == "NDCG": extract_NDCG_for_fold( metrics, fold_labels[i], i, predictions, truth, y_all[learn_options["ground_truth_label"]].values, test, y_pred, learn_options) elif learn_options["training_metric"] == 'spearmanr': extract_spearman_for_fold( metrics, fold_labels[i], i, predictions, truth, y_all[learn_options["ground_truth_label"]].values, test, y_pred, learn_options) truth, predictions = fill_in_truth_and_predictions( truth, predictions, fold_labels[i], y_all, y_pred, learn_options, test) print "\t\tRMSE: ", np.sqrt(((y_pred - y[test])**2).mean()) print "\t\tSpearman correlation: ", util.spearmanr_nonan( y[test], y_pred)[0] print "\t\tfinished fold/gene %i of %i" % (i + 1, len(fold_labels)) cv_median_metric = [np.median(metrics)] gene_pred = [(truth, predictions)] print "\t\tmedian %s across gene folds: %.3f" % ( learn_options["training_metric"], cv_median_metric[-1]) t3 = time.time() print "\t\tElapsed time for cv is %.2f seconds" % (t3 - t2) return metrics, gene_pred, fold_labels, m, dimsum, filename, feature_names
proximal_5mer_counts = proximal_5mer.groupby(["proximal_5mers"]).size().reset_index() proximal_5mer = proximal_5mer.merge(proximal_5mer_counts, on="proximal_5mers") proximal_5mer = proximal_5mer.rename(columns={0: 'proximal_5mer_counts'}) return proximal_5mer if __name__ == '__main__': feature_df = pd.read_csv("../../../../../results/cleaned_c_elegans_30mers_energies.csv") features = featurize_data(feature_df, learn_options=learn_options, Y=feature_df, gene_position=feature_df) features['proximal_5mer'] = get_proximal_5mer_feature(feature_df) inputs, dim, dimsum, feature_names = concatenate_feature_sets(features) doensch_df = pd.DataFrame(inputs, columns=feature_names) feature_df = feature_df.join(doensch_df) feature_df = feature_df.drop(axis=1, labels=['sgRNA', 'Gene target', '30mer', 'WormsInjected', 'SuccessfulInjections']) feature_df = pd.get_dummies(feature_df).dropna(axis=0) if any(feature_df.columns.duplicated()): feature_df = feature_df.loc[:, ~feature_df.columns.duplicated()] feature_df = feature_df.rename(columns={"SuccessRate": "target"}) print(feature_df.shape) cols = feature_df.columns.tolist() cols.append(cols.pop(cols.index('target'))) feature_df = feature_df.reindex(columns=cols)
def predict(seq, aa_cut=-1, percent_peptide=-1, model=None, model_file=None, pam_audit=True, length_audit=False, learn_options_override=None): """ if pam_audit==False, then it will not check for GG in the expected position this is useful if predicting on PAM mismatches, such as with off-target """ print "predict function running" # assert not (model is None and model_file is None), "you have to specify either a model or a model_file" assert isinstance(seq, (np.ndarray)), "Please ensure seq is a numpy array" assert len(seq[0]) > 0, "Make sure that seq is not empty" assert isinstance(seq[0], str), "Please ensure input sequences are in string format, i.e. 'AGAG' rather than ['A' 'G' 'A' 'G'] or alternate representations" if aa_cut is not None: assert len(aa_cut) > 0, "Make sure that aa_cut is not empty" assert isinstance(aa_cut, (np.ndarray)), "Please ensure aa_cut is a numpy array" assert np.all(np.isreal(aa_cut)), "amino-acid cut position needs to be a real number" if percent_peptide is not None: assert len(percent_peptide) > 0, "Make sure that percent_peptide is not empty" assert isinstance(percent_peptide, (np.ndarray)), "Please ensure percent_peptide is a numpy array" assert np.all(np.isreal(percent_peptide)), "percent_peptide needs to be a real number" if model_file is None: azimuth_saved_model_dir = os.path.join(os.path.dirname(__file__), 'saved_models') if np.any(percent_peptide == -1) or (percent_peptide is None and aa_cut is None): print("No model file specified, using V3_model_nopos") model_name = 'V3_model_nopos.pickle' else: print("No model file specified, using V3_model_full") model_name = 'V3_model_full.pickle' model_file = os.path.join(azimuth_saved_model_dir, model_name) if model is None: with open(model_file, 'rb') as f: model, learn_options = pickle.load(f) print model_file print learn_options else: model, learn_options = model learn_options["V"] = 2 learn_options = override_learn_options(learn_options_override, learn_options) # Y, feature_sets, target_genes, learn_options, num_proc = setup(test=False, order=2, learn_options=learn_options, data_file=test_filename) # inputs, dim, dimsum, feature_names = pd.concatenate_feature_sets(feature_sets) Xdf = pandas.DataFrame(columns=[u'30mer', u'Strand'], data=zip(seq, ['NA' for x in range(len(seq))])) if np.all(percent_peptide != -1) and (percent_peptide is not None and aa_cut is not None): gene_position = pandas.DataFrame(columns=[u'Percent Peptide', u'Amino Acid Cut position'], data=zip(percent_peptide, aa_cut)) else: gene_position = pandas.DataFrame(columns=[u'Percent Peptide', u'Amino Acid Cut position'], data=zip(np.ones(seq.shape[0])*-1, np.ones(seq.shape[0])*-1)) feature_sets = feat.featurize_data(Xdf, learn_options, pandas.DataFrame(), gene_position, pam_audit=pam_audit, length_audit=length_audit) inputs, dim, dimsum, feature_names = util.concatenate_feature_sets(feature_sets) # call to scikit-learn, returns a vector of predicted values preds = model.predict(inputs) # also check that predictions are not 0/1 from a classifier.predict() (instead of predict_proba() or decision_function()) unique_preds = np.unique(preds) ok = False for pr in preds: if pr not in [0,1]: ok = True assert ok, "model returned only 0s and 1s" return preds