t_acc = 0
t_acc_max = 0

for video_idx, s_filename in enumerate(video_file_stem_names):
    _, user_labels, feature_sizes = SumMeMultiViewFeatureLoader.load_by_name(s_filename, doSoftmax=False)
    s_seg = pdefined_segs[s_filename]
    # s_F1_scores = []
    avg_labels = np.mean(user_labels, axis=1)
    n_frames = len(avg_labels)
    # avg_indices = np.argsort(-avg_labels)
    # endpoints = int(0.15*len(avg_labels))
    # for i in range(len(avg_labels)):
    #     avg_labels[avg_indices[i]] = 0 if i <= endpoints else 1


    s_frame01scores = rep_conversions.framescore2frame01score(avg_labels, s_seg)
    user_scores_list = [user_labels[:, i] for i in range(user_labels.shape[1])]
    s_F1_score = metrics.averaged_F1_score(y_trues=user_scores_list, y_score=s_frame01scores.tolist())
    # s_F1_scores.append(s_F1_score)

    print "[{:02d} | {:02d}]\t{:s}: \t:{:.04f}, n_frames:{:d}".format(video_idx, len(video_file_stem_names), s_filename, s_F1_score, user_labels.shape[0])
    t_acc += (s_F1_score)
print "Total MinAcc: {:.04f}".format(t_acc/len(video_file_stem_names))



from vsSummDevs.SumEvaluation import rep_conversions

dataset_name = 'tvsum'
tvsum_gt = LoadLabels.load_annotations()
tv_segments = getShotBoundariesECCV2016.getTVSumShotBoundaris()

KY_dataset_path = os.path.join(os.path.expanduser('~'),
                               'datasets/KY_AAAI18/datasets')
h5f_path = os.path.join(
    KY_dataset_path,
    'eccv16_dataset_{:s}_google_pool5.h5'.format(dataset_name))
dataset = h5py.File(h5f_path, 'r')
dataset_keys = dataset.keys()
n_videos = len(dataset_keys)

index_dict = getShotBoundariesECCV2016.getTVSumCorrespondecesKZ()

for s_videostem in tvsum_gt.keys():
    user_annotations = tvsum_gt[s_videostem]
    s_segments = tv_segments[s_videostem]
    # key = dataset_keys[index_dict[s_videostem]]
    key = 'video_{:d}'.format(index_dict[s_videostem] + 1)
    s_user_score = user_annotations['video_user_scores'][:, 0]
    s_user_score01 = rep_conversions.framescore2frame01score(
        s_user_score.tolist(), s_segments.tolist())
    ky_user_summary = dataset[key]['user_summary'][...]
    ky_user_summary0 = ky_user_summary[0]
    cps = dataset[key]['change_points'][...]
    print "DEBUG"

print "DEBUG"
Exemple #3
0
F1_scores = 0
for video_idx, s_filename in enumerate(videofile_stems):

    video_features, user_labels, _ = SumMeMultiViewFeatureLoader.load_by_name(
        s_filename, doSoftmax=False)

    sklearn.preprocessing.normalize(video_features)
    pca = PCA(whiten=True, svd_solver='auto')
    pca.fit(video_features.transpose())
    matrix = pca.components_
    frame_contrib = np.sum(pca.components_, axis=0)

    frame_contrib = (frame_contrib - np.min(frame_contrib)) / (
        np.max(frame_contrib) - np.mean(frame_contrib))
    s_seg = pdefined_segs[s_filename]
    s_frame01scores = rep_conversions.framescore2frame01score(
        frame_contrib, s_seg)

    # s_frame01scores = rep_conversions.framescore2frame01score_sort(frame_contrib)

    user_scores_list = [user_labels[:, i] for i in range(user_labels.shape[1])]
    s_F1_score = metrics.max_F1_score(y_trues=user_scores_list,
                                      y_score=s_frame01scores.tolist())
    avg_labels = np.mean(user_labels, axis=1)

    s_scorr, s_p = pearsonr(frame_contrib, avg_labels)
    print "[{:d} | {:d}] \t {:s} \t{:.04f}\t correlation:{:.04f}".format(
        video_idx, len(videofile_stems), s_filename, s_F1_score, s_scorr)
    F1_scores += s_F1_score

print "overall F1 score: {:.04f}".format(F1_scores / len(videofile_stems))
for video_idx, s_filename in enumerate(video_file_stem_names):

    # s_filename = video_file_stem_names[0]
    s_segments = pdefined_segs[s_filename].tolist()
    video_features, user_labels, feature_sizes = SumMeMultiViewFeatureLoader.load_by_name(
        s_filename, doSoftmax=True)
    # original_nFrames = video_features.shape[0]
    # video_features = video_features[::sample_rate, :]
    # avg_labels = np.mean(user_labels, axis=1)

    frame_entropy = SumMeMultiViewFeatureLoader.feature_entropy(
        video_features, feature_sizes)

    frame_entropy = (frame_entropy - np.min(frame_entropy)) / (
        np.max(frame_entropy) - np.min(frame_entropy))

    s_frame01scores = rep_conversions.framescore2frame01score(
        frame_entropy, s_segments)
    user_scores_list = [user_labels[:, i] for i in range(user_labels.shape[1])]
    s_F1_score = metrics.averaged_F1_score(y_trues=user_scores_list,
                                           y_score=s_frame01scores.tolist())
    # print  "{:s} \t{:.04f}\t".format(s_filename, s_F1_score)
    print "[{:d} | {:d}] \t {:s} \t{:.04f}".format(video_idx,
                                                   len(video_file_stem_names),
                                                   s_filename, s_F1_score)
    totalF1 += s_F1_score

print "overall F1 score: {:.04f}".format(totalF1 / len(video_file_stem_names))

# print "DEBUG"
Exemple #5
0
        s_seg = pdefined_segs[s_filename]
        ntrain = int(percentage*video_features.shape[0])

        selected_train_features = video_features[:ntrain, :]
        selected_train_labels = reg_labels[:ntrain]
        # val_features = video_features[ntrain:,:]
        # val_labels = reg_labels[ntrain:]

        # clc_labels = np.max(user_labels, axis=1)
        clf = svm.LinearSVR(C=C)
        clf.fit(selected_train_features, selected_train_labels)
        predicted_video_labels = clf.predict(video_features)
        s_frame01scores = rep_conversions.framescore2frame01score_sort(predicted_video_labels)
        user_scores_list = [user_labels[:, i] for i in range(user_labels.shape[1])]
        s_scorr, s_p = pearsonr(predicted_video_labels, reg_labels)

        s_F1_score_sort = metrics.averaged_F1_score(y_trues=user_scores_list, y_score=s_frame01scores.tolist())
        t_acc_sort += s_F1_score_sort

        s_frame01scores = rep_conversions.framescore2frame01score(predicted_video_labels, s_seg)
        s_F1_score_seg = metrics.averaged_F1_score(y_trues=user_scores_list, y_score=s_frame01scores.tolist())

        t_acc_seg += s_F1_score_seg

        print "[{:d} | {:d}] \t {:s} \t{:.04f}\t correlation:{:.04f}\t{:.04f}".format(video_idx, len(video_file_stem_names),
                                                                                      s_filename, s_F1_score_sort, s_scorr, s_F1_score_seg)
    print "Total Acc: {:.04f}\t{:.04f}".format(t_acc_sort / len(video_file_stem_names), t_acc_seg/(len(video_file_stem_names)))



t_acc = 0
for video_idx, s_filename in enumerate(video_file_stem_names):
    _, user_labels, feature_sizes = SumMeMultiViewFeatureLoader.load_by_name(
        s_filename, doSoftmax=False)
    s_seg = pdefined_segs[s_filename]
    avg_labels = np.mean(user_labels, axis=1)
    nframes = user_labels.shape[0]
    intevals = rep_conversions.convert_seg2interval(s_seg, nframes)
    intevals = np.asarray(intevals)
    nfps = []
    for i in range(1, len(s_seg)):
        nfps.append(s_seg[i] - s_seg[i - 1])

    generated_summary = vsum_tools.generate_summary(avg_labels,
                                                    intevals,
                                                    nframes,
                                                    nfps,
                                                    positions=np.asarray(
                                                        range(nframes)))

    my_generated_summary = rep_conversions.framescore2frame01score(
        avg_labels, s_seg)

    s_F1_score, _, _ = vsum_tools.evaluate_summary(generated_summary,
                                                   user_labels.transpose(),
                                                   eval_metric='max')

    t_acc += (s_F1_score)
print "Total MinAcc: {:.04f}".format(t_acc / len(video_file_stem_names))
Exemple #7
0
sample_rate = 5
pdefined_segs = getSegs.getSumMeShotBoundaris()
video_file_stem_names = dataset_pathvars.file_names
video_file_stem_names.sort()

t_acc_min = 0
t_acc_max = 0

for video_idx, s_filename in enumerate(video_file_stem_names):
    _, user_labels, feature_sizes = SumMeMultiViewFeatureLoader.load_by_name(
        s_filename, doSoftmax=False)
    s_seg = pdefined_segs[s_filename]
    s_F1_scores = []
    for user_idx in range(user_labels.shape[1]):
        selected_labels = user_labels[:, user_idx]
        s_frame01scores = rep_conversions.framescore2frame01score(
            selected_labels, s_seg)
        user_scores_list = [
            user_labels[:, i] for i in range(user_labels.shape[1])
        ]
        s_F1_score = metrics.averaged_F1_score(
            y_trues=user_scores_list, y_score=s_frame01scores.tolist())
        s_F1_scores.append(s_F1_score)

    print "[{:02d} | {:02d}]\t{:s}: \tMin:{:.04f}\tMax:{:.04f}".format(
        video_idx, len(video_file_stem_names), s_filename, min(s_F1_scores),
        max(s_F1_scores))
    t_acc_min += min(s_F1_scores)
    t_acc_max += max(s_F1_scores)
print "Total MinAcc: {:.04f}\t MaxAcc{:.04f}".format(
    t_acc_min / len(video_file_stem_names),
    t_acc_max / len(video_file_stem_names))