def eval_scores(score_files, score_weights, agg_method): """Fuse the score files of different models Args: list(str) score_files: file names of score files list(float) score_weights: weights of score files str agg_method: the name of method for aggregating the segment level scores This is because the current used scores are segment level scores. See test_models.py. Returns: int: the fused accuracy. """ score_npz_files = [np.load(x) for x in score_files] if score_weights is None: score_weights = [1] * len(score_npz_files) else: if len(score_weights) != len(score_npz_files): raise ValueError( "Only {} weight specifed for a total of {} score files".format( len(score_weights), len(score_npz_files))) score_list = [x['scores'][:, 0] for x in score_npz_files ] # x['scores'] has two columns [segment level score, label] label_list = [x['labels'] for x in score_npz_files] # label verification # score_aggregation agg_score_list = [] for score_vec in score_list: agg_score_vec = [ default_aggregation_func(x, normalization=False, crop_agg=getattr(np, agg_method)) for x in score_vec ] #lz:video level scores agg_score_list.append(np.array(agg_score_vec)) final_scores = np.zeros_like(agg_score_list[0]) for i, agg_score in enumerate(agg_score_list): final_scores += agg_score * score_weights[i] # accuracy acc = mean_class_accuracy(final_scores, label_list[0]) # softmax score softmax_scores = [softmax(vec) for vec in final_scores] return acc, softmax_scores
def get_score_11111(score_files, xxxx=0.4): crop_agg = "mean" score_npz_files = [np.load(x) for x in score_files] score_list = [x['scores'][:, 0] for x in score_npz_files] label_list = [x['labels'] for x in score_npz_files] agg_score_list = [] for score_vec in score_list: agg_score_vec = [ default_aggregation_func(x, normalization=False, crop_agg=getattr(np, crop_agg)) for x in score_vec ] agg_score_list.append(np.array(agg_score_vec)) split = score_files[0].split("_")[2] score_weights = [xxxx, 1.0 - xxxx] if score_weights is None: score_weights = [1] * len(score_npz_files) else: score_weights = score_weights if len(score_weights) != len(score_npz_files): raise ValueError( "Only {} weight specifed for a total of {} score files".format( len(score_weights), len(score_npz_files))) final_scores = np.zeros_like(agg_score_list[0]) for i, agg_score in enumerate(agg_score_list): final_scores += agg_score * score_weights[i] print "split: ", split ff = [x[0][0] for x in final_scores] acc = mean_class_accuracy(ff, label_list[0]) # print 'Final accuracy {:02f}%'.format(acc * 100) # print "rgb_score_weight: ", xxxx # print "\n" return acc
video_pred = [np.argmax(x) for x in final_scores] video_labels = label_list[0] cf = confusion_matrix(video_labels, video_pred).astype(float) print cf # accuracy for each class cls_cnt = cf.sum(axis=1) cls_hit = np.diag(cf) cls_acc = cls_hit / cls_cnt cf_acc = cf / cls_cnt print cls_acc # accuracy acc = mean_class_accuracy(final_scores, label_list[0]) print 'Final accuracy {:02f}%'.format(acc * 100) # save file if len(score_npz_files) == 2: file_path = os.path.dirname(args.score_files[0]) file_name = 'com' + os.path.splitext( args.score_files[0])[0].split('_split')[1] + '.txt' with open(file_path + '/' + file_name, 'w') as f: f.write('Confusion Matrix\n') f.write('%s\n' % cf) f.write('Confusion Matrix with accuracy\n') f.write('%s\n' % cf_acc) f.write('\nClass accuracy = \n%s\n' % cls_acc) f.write('\nFinal accuracy {:02f}%'.format(acc * 100))
parser.add_argument('--score_weights', nargs='+', type=float, default=None) args = parser.parse_args() score_npz_files = [np.load(x) for x in args.score_files] if args.score_weights is None: score_weights = [1] * len(score_npz_files) else: score_weights = args.score_weights if len(score_weights) != len(score_npz_files): raise ValueError("Only {} weight specifed for a total of {} score files" .format(len(score_weights), len(score_npz_files))) score_list = [x['scores'][:, 0] for x in score_npz_files] label_list = [x['labels'] for x in score_npz_files] # label verification # score_aggregation agg_score_list = [] for score_vec in score_list: agg_score_vec = [default_aggregation_func(x, normalization=False) for x in score_vec] agg_score_list.append(np.array(agg_score_vec)) final_scores = np.zeros_like(agg_score_list[0]) for i, agg_score in enumerate(agg_score_list): final_scores += agg_score * score_weights[i] # accuracy acc = mean_class_accuracy(final_scores, label_list[0]) print 'Final accuracy {:02f}%'.format(acc * 100)
if len(score_weights) != len(score_npz_files): raise ValueError( "Only {} weight specifed for a total of {} score files".format( len(score_weights), len(score_npz_files))) final_scores = np.zeros_like(agg_score_list[0]) for i, agg_score in enumerate(agg_score_list): final_scores += agg_score * score_weights[i] print "split: ", split # accuracy # for x in final_scores: # xx = x[0] # xxx = xx[0] ff = [x[0][0] for x in final_scores] acc, class_acc = mean_class_accuracy(ff, label_list[0]) print 'Final accuracy {:02f}%'.format(acc * 100) print "rgb_score_weight: ", xxxx print "\n" # MIFS fusion with our method #### # xxxx = 0.4 # score_weights = [xxxx, 1.0-xxxx] # if score_weights is None: # score_weights = [1] * len(score_npz_files) # else: # score_weights = score_weights # if len(score_weights) != len(score_npz_files): # raise ValueError("Only {} weight specifed for a total of {} score files" # .format(len(score_weights), len(score_npz_files))) #