def evaluate(self, model, data, labels): tt = TT("Evaluator") entidxs = set(data[:, 0]).union(set(labels)) for n in range(data.shape[0]): ranking = self.makerank(data[n], model, entidxs) tt.progress(n, data.shape[0]) for metric in self.metrics: metric([labels[n]], ranking) results = {} for metric in self.metrics: results[metric.name] = metric() tt.tock("computed") return results
def evaluate(self, model, data, labels): tt = TT("Evaluator") entidxs = set(data[:, 0]).union(set(labels)) for n in range(data.shape[0]): ranking = self.makerank(data[n], model, entidxs) tt.progress(n, data.shape[0]) for metric in self.metrics: metric([labels[n]], ranking) results = {} for metric in self.metrics: results[metric.name] = metric() tt.tock("computed") return results
def evaluate(data, model, *metrics): tt = TT("Evaluator") datadf = pd.DataFrame(data) datadf.columns = ["s", "r", "o"] datadfgb = datadf.groupby(by=["s", "r"]).apply(lambda x: set(x["o"].unique())).reset_index() datadf.describe() entidxs = set(datadf["s"].unique()).union(set(datadf["o"].unique())) results = {} tt.tock("initialized").tick() for n, row in datadfgb.iterrows(): ranking = makerank(row, model, entidxs) tt.progress(n, datadfgb.shape[0]) for metric in metrics: metric(row, ranking) for metric in metrics: results[metric.name] = metric() tt.tock("computed") return results
def evaluate(data, model, *metrics): tt = TT("Evaluator") datadf = pd.DataFrame(data) datadf.columns = ["s", "r", "o"] datadfgb = datadf.groupby( by=["s", "r"]).apply(lambda x: set(x["o"].unique())).reset_index() datadf.describe() entidxs = set(datadf["s"].unique()).union(set(datadf["o"].unique())) results = {} tt.tock("initialized").tick() for n, row in datadfgb.iterrows(): ranking = makerank(row, model, entidxs) tt.progress(n, datadfgb.shape[0]) for metric in metrics: metric(row, ranking) for metric in metrics: results[metric.name] = metric() tt.tock("computed") return results