def compare_model(): index = Index() models = [Vectoriel(index, SimpleWeighter(index), normalized=True), Vectoriel(index, BasicWeighter(index), normalized=True), Vectoriel(index, ThirdWeighter(index), normalized=False), Vectoriel(index, FourthWeighter(index), normalized=False), Okapi(index), LanguageModel(index, .7), HITS(index, seeds=3, k=5), PageRank(index, seeds=3, k=5) ] names = ['Vectoriel1', 'Vectoriel2','Vectoriel3','Vectoriel4','Okapi', 'Language', 'HITS', 'PageRank'] models_scores = compare_models(names, models) fig = plt.figure(figsize=(10,8)) colors=iter(cm.hot(np.linspace(0,.7,len(names)))) x = [s['AveragePrecision']['mean'] for s in scores.values()] plt.xticks(x, scores.keys()) plt.bar(x, models_scores.values(), alpha=.4, color=c) # for n, s in models_scores.items(): # color = next(colors) # y = s['PrecisionRecall']['mean'] # x = list(range(len(y))) # plt.plot(x, y, color=color, label=n) # plt.legend() plt.savefig('plot/models_comparison_precision_eval.png')
def __init__(self, index): super(FeaturerVectoriel1, self).__init__(index) self.model = Vectoriel(index, SimpleWeighter(index), normalized=True) self.features = {}
def __init__(self, index): super(FeaturerVectoriel4, self).__init__(index) self.model = Vectoriel(index, FourthWeighter(index), normalized=False) self.features = {}