alpha = 0.3

USE_CACHE = False
DUMP_CACHE = False

cv = CrossValidationManager('RET')
configurations = [
    '{}{}'.format(cv.get_run_id(i)[0],
                  cv.get_run_id(i)[1])
    for i in ([0, 1, 3, 4, 7] if dset == 'Valid' else [2, 5, 6, 8, 9])
]  # [range(10)]
#configurations = range(n_groups)
# configurations = [1]
dset = 'Valid'

runs, run_net_types, run_metrics, run_epochs, run_names, _, _ = load_experiments(
    experiment_name)

for m, metric_rating in enumerate(rating_metrics):

    Valid_epochs, Idx_malig_pearson, Idx_malig_kendall, Idx_rating_pearson, Idx_rating_kendall = [], [], [], [], []

    for run, net_type, dist, epochs, metric in zip(runs, run_net_types,
                                                   run_metrics, run_epochs,
                                                   run_metrics):
        plot_data_filename = './Plots/Data/correlation_{}{}.p'.format(
            net_type, run)
        try:
            if USE_CACHE is False:
                print('NOTE: SKIPPING TO EVELUATION')
                assert False
            valid_epochs, idx_malig_pearson, idx_malig_kendall, idx_rating_pearson, idx_rating_kendall = \
Exemplo n.º 2
0
if __name__ == "__main__":

    # Setup
    exp_name = 'DirSimilarityLoss'
    dset = 'Test'
    rating_norm = 'none'
    n_groups = 5
    start = timer()
    num_of_indexes = 3 + 4

    # cv = CrossValidationManager('RET')
    # configurations = ['{}{}'.format(cv.get_run_id(i)[0], cv.get_run_id(i)[1]) for i in [0, 1, 3, 4, 7]]  # [range(10)]
    configurations = range(n_groups)
    # configurations = [1]

    runs, run_net_types, run_metrics, _, run_names, run_ep_perf, run_ep_comb = load_experiments(
        exp_name)

    data = np.zeros((len(runs), num_of_indexes))
    dataStd = np.zeros((len(runs), num_of_indexes))

    # evaluate
    run_id = 0
    for run, net_type, _, metric, epochs in zip(runs, run_net_types,
                                                range(len(runs)), run_metrics,
                                                run_ep_perf):
        print("Evaluating classification accuracy for {}{}".format(
            net_type, run))
        acc, acc_std, _ = eval_classification(run=run,
                                              net_type=net_type,
                                              dset=dset,
                                              metric=metric,
Exemplo n.º 3
0
def plot_row(i, distances, label=None):
    tau, l_e = index.kumar(distances, res=0.01)
    conc = index.concentration(distances)
    contrast = index.relative_contrast_imp(distances)

    p[i].hist(distances.flatten() / np.mean(distances), bins=20)
    p[i].axes.title.set_text('std:{:.2f}, conc:{:.2f}, ctrst:{:.2f}'.format(
        conc[2], conc[0], contrast[0]))
    p[i].axes.yaxis.label.set_text('distribution')
    p[i].axes.xaxis.label.set_text('distances')
    Ret.pca(epoch=e, plt_=p[i + 1], label=label)
    p[i + 2].plot(l_e[1], l_e[0])
    p[i + 2].axes.title.set_text('Kumari (tau = {:.2f}'.format(tau))


runs, run_net_types, run_metrics, run_epochs, run_names, _, _ = load_experiments(
    'SiamRating')

# evaluate

Epochs, Idx_hubness, Idx_symmetry, Idx_concentration, Idx_contrast, Idx_kummar = [], [], [], [], [], []

for m, metric in enumerate(metrics):
    print("Begin: {} metric".format(metric))
    for run, net_type, r, epochs, name in zip(runs, run_net_types,
                                              range(len(runs)), run_epochs,
                                              run_names):
        print("Evaluating run {}{}".format(net_type, run))
        # initialize figures
        plt.figure("Distances - {}".format(name))
        p = [None] * 9
        for i in range(9):