def run_epoch(sess, model, pred_op, train_op, is_valid): dataset_tr = model.dataset.get_data(is_valid) dataset_tr.reset() model.reset() for i in range(floor(len(dataset_tr.students) / model.dataset.batch_size)): model.run_batch(sess, pred_op, train_op, is_valid) return score(dataset_tr.labels, model.predictions)
def evaluator(prediction, gold, tag_scheme='BIES', verbose=False): assert len(prediction) == len(gold) scores = score(gold[0], prediction[0], verbose) print 'Segmentation F-score: %f' % scores[0] if tag_scheme != 'seg': print 'Tagging F-score: %f' % scores[1] scores = [scores] return scores
def evaluator(prediction, gold, metric='F1-score', tag_num=1, verbose=False): assert len(prediction) == len(gold) scores = (0, 0, 0, 0, 0, 0) scores_b = (0, 0, 0, 0, 0, 0) if metric in ['F1-score', 'Precision', 'Recall', 'All']: scores = score(gold[0], prediction[0], tag_num, verbose) if metric in ['Boundary-F1-score', 'All']: scores_b = score_boundaries(gold[0], prediction[0], verbose) return scores + scores_b
def compete(self, x, y, live_ratio=0.5): model_score = { m: evaluation.score(m, x, y, score_function=self.score_function) for m in self.models } self.score_history.append(model_score) score_ranking = sorted(model_score.keys(), key=lambda model: model_score[model], reverse=True) from utilities import partition_indices start_i, keep_i, kill_i = partition_indices(score_ranking, live_ratio) self.models[keep_i:kill_i] = self.new_models(kill_i - keep_i)
cg_result = [] lg_result = [] valid_trips = [] sp = [] wsp = [] lsp = [] cp = [] lp = [] for i in range(len(test)): t = test[i] if len(t) > 1: if (G.has_node(t[0])) & (G.has_node(t[-1])): try: Shortest_path = nx.dijkstra_path(G, t[0], t[-1]) sp.append(Shortest_path) s = score(t, Shortest_path) result.append(s) shortest_path = nx.dijkstra_path(WG, t[0], t[-1]) wsp.append(shortest_path) wScore = score(t, shortest_path) weighted_result.append(wScore) Csp = nx.dijkstra_path(CG, t[0], t[-1]) cp.append(Csp) cScore = score(t, Csp) cg_result.append(cScore) Lsp = nx.dijkstra_path(LG, t[0], t[-1]) lp.append(Lsp) lScore = score(t, Lsp)
def evaluation(self): self.score = 0.0 for i in self.individuals: i.score = evaluation.score(evaluation.permute(self.cipher, i.sequence)) self.score += i.score
print_summary = '%s (%d %d%%) %.4f' % (time_since( start, epoch / EPOCH), epoch, epoch / EPOCH * 100, print_loss_avg) print(print_summary) del print_loss_total torch.save(net, "net_attn.pkl") #evaluate_all('result_label.txt', maxlength, lang, label, net) ground, predict = evaluate_all2('result_label.txt', data_test, maxlength_test, lang, label, net) ''' f = open('result_label.txt', "r") lines = f.readlines() label_list = [] prediction_list = [] for line in lines: label_list.append(line.split(" ")[1]) prediction_list.append(line.split(" ")[3].rstrip()) ''' predict = [label.index2label[i] for i in predict] rep, res = score(ground, predict) #write the result of this epoch to result_matrix.txt result_matrix.writelines(rep) result_matrix.writelines("\n") result_matrix.writelines(str(res)) #result_matrix.close() #result_matrix.writelines("\n") print("evaluation result:") print(rep) print(res) del rep, res, predict, ground
@author: splin """ import random, sys from evaluation import score, evaluate # rock paper scissors game print('ROCK, PAPER, SCISSORS') wins = 0 losses = 0 ties = 0 moves = {'r': 'ROCK', 'p': 'PAPER', 's': 'SCISSORS'} # random.choices generates a list with a single value from default, i proceed # to unlist that new generated value while True: score(wins, losses, ties) while True: # this seems confusing, but i did this so there was no need to create a list # with each one of the moves value = random.choices([v for v in moves.values()]).pop() print("Enter your move: (r)ock, (p)aper, (s)cissors or (q)uit") move = input() if move == 'q': sys.exit() elif move in ['p', 'r', 's', 'q']: break print('Type one of r, p, s, or q.')
selection = active_items[(active_items.industry_id == user.industry_id) & (active_items.region == user.region)] if selection.shape[0] > 100: selection = selection.sample(100) item_scores.loc[selection.index] += 1 selection = active_items[active_items.career_level == user.career_level] item_scores = item_scores.loc[selection.index] res.append(list(item_scores.sort_values(ascending=False)[:30].index)) df = pd.DataFrame({'recommended': res}, index=users_val.index) # Scoring intersect = np.array([ len(set(a).intersection(b)) for a, b in zip(df.recommended, target_val.relevant) ]) print 'Number of intersections = {}'.format(intersect.sum()) s = score(df, target_val) print 'Score = {}'.format(s) print('Leaderbord score = {}'.format(s / users_val.shape[0] * 50000)) print('Full score = {}'.format(s / users_val.shape[0] * 150000)) # Save result df.recommended = df.recommended.apply(lambda a: ','.join(a.astype(str))) df.to_csv('../own_data/baseline_val.csv', sep='\t')
def score(self, X, y=None): reconstructed = self.transform(X) # TODO: connect to evaluation script return evaluation.score(X, reconstructed)