def train(self, data, verbose=True): """ Train all models and return the best one. Models are evaluated and ranked according to their ROC-AUC on a validation data set. Parameters ---------- data: pysster.Data A Data object providing training and validation data sets. verbose: bool If True, progress information (train/val loss) will be printed throughout the training. Returns ------- results: tuple(pysster.Model, str) The best performing model and an overview table of all models are returned. """ best_model_path = "{}/{}".format( gettempdir(), ''.join(random.choice(string.ascii_uppercase) for _ in range(20))) aucs = [] max_auroc = -1 for i, candidate in enumerate(self.candidates): model = Model(candidate, data) model.train(data, verbose) predictions = model.predict(data, "val") labels = data.get_labels("val") report = utils.performance_report(labels, predictions) roc_auc = np.sum(report[:, 0:-1] * report[:, -1, np.newaxis], axis=0) roc_auc = (roc_auc / np.sum(report[:, -1]))[3] aucs.append(roc_auc) if aucs[-1] > max_auroc: max_auroc = aucs[-1] utils.save_model(model, best_model_path) K.clear_session() K.reset_uids() if not verbose: continue print("\n=== Summary ===") print("Model {}/{} = {:.5f} weighted avg roc-auc".format( i + 1, len(self.candidates), aucs[i])) for param in candidate: if not param in ["input_shape"]: print(" - {}: {}".format(param, candidate[param])) # load the best model (and remove it from disc) model = utils.load_model(best_model_path) remove(best_model_path) remove("{}.h5".format(best_model_path)) # save a formatted summary of all trained models table = self._grid_search_table(aucs) return model, table
def measure_rbp(entry): import os from time import time from pysster import utils output_folder = entry[4] + "_pysster/" if not os.path.isdir(output_folder): os.makedirs(output_folder) start = time() # predict secondary structures utils.predict_structures(entry[0], entry[0] + ".struct", annotate=True) utils.predict_structures(entry[1], entry[1] + ".struct", annotate=True) utils.predict_structures(entry[2], entry[2] + ".struct", annotate=True) utils.predict_structures(entry[3], entry[3] + ".struct", annotate=True) from pysster.Data import Data from pysster.Model import Model # load data data = Data([entry[0] + ".struct", entry[1] + ".struct"], ("ACGU", "HIMS")) data.train_val_test_split( 0.8, 0.1999 ) # we need to have at least one test sequence, even though we have a separate test object # training params = {"kernel_len": 8} model = Model(params, data) model.train(data) # load and predict test data data_test = Data([entry[2] + ".struct", entry[3] + ".struct"], ("ACGU", "HIMS")) predictions = model.predict(data_test, "all") stop = time() print("{}, time in seconds: {}".format(entry[4], stop - start)) # performance evaluation labels = data_test.get_labels("all") utils.plot_roc(labels, predictions, output_folder + "roc.pdf") utils.plot_prec_recall(labels, predictions, output_folder + "prec.pdf") # get motifs activations = model.get_max_activations(data_test, "all") _ = model.visualize_all_kernels(activations, data_test, output_folder) # save model to drive utils.save_model(model, "{}model.pkl".format(output_folder))
def test_data_additional(self): self.assertTrue(len(self.data_pwm.meta) == 2) self.assertTrue(self.data_pwm.meta[0]['is_categorical'] == False) self.assertTrue(self.data_pwm.meta[1]['is_categorical'] == True) self.assertTrue(self.data_pwm.meta[0]['data'] == [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1]) self.assertTrue(len(self.data_pwm.meta[1]['data']) == 32) for x in self.data_pwm.meta[1]['data']: self.assertTrue(sum(x) == 1) self.assertTrue((self.data_pwm.meta[1]['data'][0] == self.data_pwm.meta[1]['data'][31]).all()) self.assertTrue((self.data_pwm.meta[1]['data'][13] == self.data_pwm.meta[1]['data'][18]).all()) addi = self.data_pwm._get_additional_data([0,1,15,16], 0, 4) self.assertTrue(len(addi) == 4) self.assertTrue(np.allclose(addi[0], [1,*self.data_pwm.meta[1]['data'][0]])) self.assertTrue(np.allclose(addi[1], [2,*self.data_pwm.meta[1]['data'][1]])) self.assertTrue(np.allclose(addi[2], [16,*self.data_pwm.meta[1]['data'][15]])) self.assertTrue(np.allclose(addi[3], [16,*self.data_pwm.meta[1]['data'][16]])) mod = Model({"conv_num":1, "kernel_num":2, "kernel_len":4, "neuron_num":2, "epochs":2}, self.data_pwm) mod.train(self.data_pwm, verbose=True) predictions = mod.predict(self.data_pwm, "all") self.assertTrue(predictions.shape == (32,2))
def train(self, data, pr_auc=False, verbose=True): """ Train all models and return the best one. Models are evaluated and ranked according to their ROC-AUC or PR-AUC (precision-recall) on a validation data set. Parameters ---------- data: pysster.Data A Data object providing training and validation data sets. pr_auc: bool If True, the area under the precision-recall curve will be maximized instead of the area under the ROC curve verbose: bool If True, progress information (train/val loss) will be printed throughout the training. Returns ------- results: tuple(pysster.Model, str) The best performing model and an overview table of all models are returned. """ best_model_path = "{}/{}".format( gettempdir(), ''.join(random.choice(string.ascii_uppercase) for _ in range(20))) if True == pr_auc: metric_idx = 4 metric_name = "pre-auc" else: metric_idx = 3 metric_name = "roc-auc" metric = [] max_metric = -1 for i, candidate in enumerate(self.candidates): model = Model(candidate, data) model.train(data, verbose) predictions = model.predict(data, "val") labels = data.get_labels("val") report = utils.performance_report(labels, predictions) metric_val = np.sum(report[:, 0:-1] * report[:, -1, np.newaxis], axis=0) metric_val = (metric_val / np.sum(report[:, -1]))[metric_idx] metric.append(metric_val) if metric[-1] > max_metric: max_metric = metric[-1] utils.save_model(model, best_model_path) K.clear_session() K.reset_uids() if not verbose: continue print("\n=== Summary ===") print("Model {}/{} = {:.5f} weighted avg {}".format( i + 1, len(self.candidates), metric[i], metric_name)) for param in candidate: if not param in ["input_shape"]: print(" - {}: {}".format(param, candidate[param])) # load the best model (and remove it from disc) model = utils.load_model(best_model_path) remove(best_model_path) remove("{}.h5".format(best_model_path)) # save a formatted summary of all trained models table = self._grid_search_table(metric, metric_name) return model, table
def main(): RBPs = [("data/pum2.train.positive.fasta", "data/pum2.train.negative.fasta", "data/pum2.test.positive.fasta", "data/pum2.test.negative.fasta", "PUM2"), ("data/qki.train.positive.fasta", "data/qki.train.negative.fasta", "data/qki.test.positive.fasta", "data/qki.test.negative.fasta", "QKI"), ("data/igf2bp123.train.positive.fasta", "data/igf2bp123.train.negative.fasta", "data/igf2bp123.test.positive.fasta", "data/igf2bp123.test.negative.fasta", "IGF2BP123"), ("data/srsf1.train.positive.fasta", "data/srsf1.train.negative.fasta", "data/srsf1.test.positive.fasta", "data/srsf1.test.negative.fasta", "SRSF1"), ("data/taf2n.train.positive.fasta", "data/taf2n.train.negative.fasta", "data/taf2n.test.positive.fasta", "data/taf2n.test.negative.fasta", "TAF2N"), ("data/nova.train.positive.fasta", "data/nova.train.negative.fasta", "data/nova.test.positive.fasta", "data/nova.test.negative.fasta", "NOVA")] for entry in RBPs: output_folder = entry[4] + "_pysster/" if not os.path.isdir(output_folder): os.makedirs(output_folder) start = time() # predict secondary structures utils.predict_structures(entry[0], entry[0]+".struct.gz", annotate=True) utils.predict_structures(entry[1], entry[1]+".struct.gz", annotate=True) utils.predict_structures(entry[2], entry[2]+".struct.gz", annotate=True) utils.predict_structures(entry[3], entry[3]+".struct.gz", annotate=True) # load data data = Data([entry[0]+".struct.gz", entry[1]+".struct.gz"], ("ACGU", "HIMS")) data.train_val_test_split(0.8, 0.1999) # we need to have at least one test sequence, even though we don't need it print(data.get_summary()) # training params = {"kernel_len": 8} model = Model(params, data) model.train(data) # load and predict test data data_test = Data([entry[2]+".struct.gz", entry[3]+".struct.gz"], ("ACGU", "HIMS")) predictions = model.predict(data_test, "all") stop = time() print("{}, time in seconds: {}".format(entry[4], stop-start)) # performance evaluation labels = data_test.get_labels("all") utils.plot_roc(labels, predictions, output_folder+"roc.pdf") utils.plot_prec_recall(labels, predictions, output_folder+"prec.pdf") print(utils.get_performance_report(labels, predictions)) # get motifs activations = model.get_max_activations(data_test, "all") logos, scores = [], [] for kernel in range(model.params["kernel_num"]): logo, score = model.visualize_kernel(activations, data_test, kernel, output_folder) logos.append(logo) scores.append(score) # sort motifs by importance score sorted_idx = [i[0] for i in sorted(enumerate(scores), key=lambda x:x[1])] with open(output_folder+"kernel_scores.txt", "wt") as handle: for x in sorted_idx: print("kernel {:>3}: {:.3f}".format(x, scores[x])) handle.write("kernel {:>3}: {:.3f}\n".format(x, scores[x])) # save model to drive utils.save_model(model, "{}model.pkl".format(output_folder))
def test_data_additional(self): self.assertTrue(len(self.data_pwm.meta) == 2) self.assertTrue(self.data_pwm.meta[0]['is_categorical'] == False) self.assertTrue(self.data_pwm.meta[1]['is_categorical'] == True) self.assertTrue(self.data_pwm.meta[0]['data'] == [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1 ]) self.assertTrue(len(self.data_pwm.meta[1]['data']) == 32) for x in self.data_pwm.meta[1]['data']: self.assertTrue(sum(x) == 1) self.assertTrue((self.data_pwm.meta[1]['data'][0] == self.data_pwm.meta[1]['data'][31]).all()) self.assertTrue((self.data_pwm.meta[1]['data'][13] == self.data_pwm.meta[1]['data'][18]).all()) addi = self.data_pwm._get_additional_data([0, 1, 15, 16], 0, 4) self.assertTrue(len(addi) == 4) self.assertTrue( np.allclose(addi[0], [1, *self.data_pwm.meta[1]['data'][0]])) self.assertTrue( np.allclose(addi[1], [2, *self.data_pwm.meta[1]['data'][1]])) self.assertTrue( np.allclose(addi[2], [16, *self.data_pwm.meta[1]['data'][15]])) self.assertTrue( np.allclose(addi[3], [16, *self.data_pwm.meta[1]['data'][16]])) # check position-wise additional data self.assertTrue(len(self.data_pwm.positionwise) == 2) self.assertTrue( list(self.data_pwm.positionwise.keys()) == ["feat1", "feat2"]) gen = self.data_pwm._data_generator("all", 32, False, False) dat = next(gen) self.assertTrue(dat[1].shape == (32, 17)) self.assertTrue(dat[0].shape == (32, 10, 14)) self.assertTrue( np.allclose(dat[0][0, :, 12], [0.9, 0.8, 0.7, 0.9, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])) self.assertTrue( np.allclose(dat[0][31, :, 13], [0.1, 0.2, 0.3, 0.1, 1.0, 1.0, 0.8, 0.3, 0.2, 0.1])) self.assertTrue( np.allclose(dat[0][31, :, 12], [2.1, 2.2, 2.3, 2.1, 2.0, 2.0, 2.8, 2.3, 2.2, 2.1])) mod = Model( { "conv_num": 1, "kernel_num": 2, "kernel_len": 4, "neuron_num": 2, "epochs": 2 }, self.data_pwm) mod.train(self.data_pwm, verbose=True) predictions = mod.predict(self.data_pwm, "all") self.assertTrue(predictions.shape == (32, 2)) # check kernel output plot for position-wise data folder = gettempdir() + '/' acts = mod.get_max_activations(self.data_pwm, 'all') motif, score = mod.visualize_kernel(acts, self.data_pwm, 0, folder) with Image.open(folder + "additional_features_kernel_0.png") as img: self.assertTrue(img.size == (500, 1400)) remove(folder + "additional_features_kernel_0.png") remove(folder + "motif_kernel_0.png") remove(folder + "position_kernel_0.png") remove(folder + "activations_kernel_0.png")