def score(self, table): prepared_table, __ = prepare_data_table( table, score_columns=self.score_columns) texp = Experiment(prepared_table) score = self.classifier.score(texp, True) texp["d_score"] = (score - self.mu) / self.nu s_values, q_values = lookup_s_and_q_values_from_error_table( texp["d_score"].values, self.error_stat.df) texp["m_score"] = q_values texp["s_value"] = s_values logging.info("mean m_score = %e, std_dev m_score = %e" % (np.mean(q_values), np.std(q_values, ddof=1))) logging.info("mean s_value = %e, std_dev s_value = %e" % (np.mean(s_values), np.std(s_values, ddof=1))) texp.add_peak_group_rank() df = table.join(texp[["d_score", "m_score", "peak_group_rank"]]) if CONFIG.get("compute.probabilities"): df = self.add_probabilities(df, texp) if CONFIG.get("target.compress_results"): to_drop = [ n for n in df.columns if n.startswith("var_") or n.startswith("main_") ] df.drop(to_drop, axis=1, inplace=True) return df
def score(self, table): prepared_table, __ = prepare_data_table(table, score_columns=self.score_columns) texp = Experiment(prepared_table) score = self.classifier.score(texp, True) texp["d_score"] = (score - self.mu) / self.nu s_values, q_values = lookup_s_and_q_values_from_error_table(texp["d_score"].values, self.error_stat.df) texp["m_score"] = q_values texp["s_value"] = s_values logging.info("mean m_score = %e, std_dev m_score = %e" % (np.mean(q_values), np.std(q_values, ddof=1))) logging.info("mean s_value = %e, std_dev s_value = %e" % (np.mean(s_values), np.std(s_values, ddof=1))) texp.add_peak_group_rank() df = table.join(texp[["d_score", "m_score", "peak_group_rank"]]) if CONFIG.get("compute.probabilities"): df = self.add_probabilities(df, texp) if CONFIG.get("target.compress_results"): to_drop = [n for n in df.columns if n.startswith("var_") or n.startswith("main_")] df.drop(to_drop, axis=1, inplace=True) return df
def enrich(self, input_table, experiment): s_values, q_values = lookup_s_and_q_values_from_error_table(experiment["d_score"], self.df) experiment["m_score"] = q_values experiment["s_value"] = s_values logging.info("mean m_score = %e, std_dev m_score = %e" % (np.mean(q_values), np.std(q_values, ddof=1))) logging.info("mean s_value = %e, std_dev s_value = %e" % (np.mean(s_values), np.std(s_values, ddof=1))) experiment.add_peak_group_rank() scored_table = input_table.join(experiment[["d_score", "m_score", "peak_group_rank"]]) return scored_table
def enrich_table_with_results(self, table, experiment, df_raw_stat): s_values, q_values = lookup_s_and_q_values_from_error_table( experiment["d_score"], df_raw_stat) experiment["m_score"] = q_values experiment["s_value"] = s_values logging.info("mean m_score = %e, std_dev m_score = %e" % (np.mean(q_values), np.std(q_values, ddof=1))) logging.info("mean s_value = %e, std_dev s_value = %e" % (np.mean(s_values), np.std(s_values, ddof=1))) experiment.add_peak_group_rank() scored_table = table.join( experiment[["d_score", "m_score", "peak_group_rank"]]) return scored_table