예제 #1
0
def perf_OffTheshelf_InceptionV1_IconArt_ArtUK_baseline():
        
    print('IconArt - InceptionV1')
    learn_and_eval('IconArt_v1',source_dataset='ImageNet',final_clf='MLP1',features='avgpool',\
                constrNet='InceptionV1',kind_method='FT',gridSearch=False,ReDo=False,\
                pretrainingModif=False,return_best_model=True,weights='RASTA_big0001_modif_adam_unfreeze50_RandForUnfreezed_SmallDataAug_ep200',\
                optimizer='SGD',opt_option=[0.01],
                transformOnFinalLayer=None,
                epochs=20,cropCenter=True,verbose=True,SGDmomentum=0.9,decay=1e-4)  
    learn_and_eval('IconArt_v1',source_dataset='ImageNet',final_clf='MLP1',features='avgpool',\
                constrNet='InceptionV1',kind_method='FT',gridSearch=False,ReDo=False,\
                pretrainingModif=False,return_best_model=True,weights='RASTA_big001_modif_RandInit_randomCrop_deepSupervision_ep200_LRschedG',\
                optimizer='SGD',opt_option=[0.01],
                transformOnFinalLayer=None,
                epochs=20,cropCenter=True,verbose=True,SGDmomentum=0.9,decay=1e-4)  
        
    print('Paintings - InceptionV1')
    learn_and_eval('Paintings',source_dataset='ImageNet',final_clf='MLP1',features='avgpool',\
                constrNet='InceptionV1',kind_method='FT',gridSearch=False,ReDo=False,\
                pretrainingModif=False,return_best_model=True,weights='RASTA_big0001_modif_adam_unfreeze50_RandForUnfreezed_SmallDataAug_ep200',\
                optimizer='SGD',opt_option=[0.01],
                transformOnFinalLayer=None,
                epochs=20,cropCenter=True,verbose=True,SGDmomentum=0.9,decay=1e-4)  
    learn_and_eval('Paintings',source_dataset='ImageNet',final_clf='MLP1',features='avgpool',\
                constrNet='InceptionV1',kind_method='FT',gridSearch=False,ReDo=False,\
                pretrainingModif=False,return_best_model=True,weights='RASTA_big001_modif_RandInit_randomCrop_deepSupervision_ep200_LRschedG',\
                optimizer='SGD',opt_option=[0.01],
                transformOnFinalLayer=None,
                epochs=20,cropCenter=True,verbose=True,SGDmomentum=0.9,decay=1e-4) 
예제 #2
0
def Use_of_diff_features_VGG():
    
    ReDo = False
    for target_dataset in ['Paintings','IconArt_v1']:
        print('The following experiments will normally reproduce the performance of Crowley 2016 with VGG central crop, grid search on C parameter of SVM but no augmentation of the image (multi crop).')
        learn_and_eval(target_dataset,source_dataset='ImageNet',final_clf='LinearSVC',features='fc2',\
                       constrNet='VGG',kind_method='TL',gridSearch=True,ReDo=ReDo,cropCenter=True,verbose=True)
        # 67.1 & 50.6 & 93.0 & 74.6 & 61.3 & 70.2 & 56.1 & 78.8 & 67.1 & 85.5 & 70.5 \\ 
        learn_and_eval(target_dataset,source_dataset='ImageNet',final_clf='LinearSVC',features='fc1',\
                       constrNet='VGG',kind_method='TL',gridSearch=True,ReDo=ReDo,cropCenter=True,verbose=True)
    
        learn_and_eval(target_dataset,source_dataset='ImageNet',final_clf='LinearSVC',features='block5_pool',\
                       constrNet='VGG',kind_method='TL',gridSearch=True,ReDo=ReDo,cropCenter=True,
                       transformOnFinalLayer='GlobalAveragePooling2D',verbose=True)
예제 #3
0
def compute_MutualInfo(target_dataset='Paintings'):
    """ The goal of this function is to compute the entropy and the mutual information 
    of the features in the ResNet model and the refined versions 
    We will compare BNRF, ROWD (mean of variance) and variance global in the case 
    of ResNet50 """

    matplotlib.use(
        'Agg')  # To avoid to have the figure that's pop up during execution

    nets = [
        'ResNet50', 'ResNet50_ROWD_CUMUL', 'ResNet50_ROWD_CUMUL',
        'ResNet50_BNRF'
    ]
    style_layers = getBNlayersResNet50()
    features = 'activation_48'
    normalisation = False
    final_clf = 'LinearSVC'  # Don t matter
    source_dataset = 'ImageNet'
    kind_method = 'TL'
    transformOnFinalLayer = 'GlobalAveragePooling2D'
    computeGlobalVariance_tab = [False, False, True, False]
    cropCenter = True
    # Load ResNet50 normalisation statistics
    Model_dict = {}

    for constrNet, computeGlobalVariance in zip(nets,
                                                computeGlobalVariance_tab):
        str_model = constrNet
        if computeGlobalVariance:
            str_model += 'GlobalVar'
        print(str_model)
        Model_dict[str_model] = {}

        output = learn_and_eval(target_dataset,source_dataset,final_clf,features,\
                               constrNet,kind_method,style_layers=style_layers,
                               normalisation=normalisation,transformOnFinalLayer=transformOnFinalLayer,
                               batch_size_RF=16,epochs_RF=20,momentum=0.9,ReDo=False,
                               returnFeatures=True,cropCenter=cropCenter,\
                               computeGlobalVariance=computeGlobalVariance)
        Xtrainval, ytrainval, X_test, y_test = output
        _, num_classes = ytrainval.shape

        # Mutual Information
        for c in range(num_classes):
            print('For class', c)
            MI_trainval_c = mutual_info_classif(Xtrainval, ytrainval[:,c], discrete_features=False, n_neighbors=3, \
                                              copy=True, random_state=0)
            sum_MI_trainval_c = np.sum(MI_trainval_c)
            MI_test_c = mutual_info_classif(X_test, y_test[:,c], discrete_features=False, n_neighbors=3, \
                                              copy=True, random_state=0)
            sum_MI_test_c = np.sum(MI_test_c)
            Model_dict[str_model][c] = {}
            Model_dict[str_model][c]['trainval'] = sum_MI_trainval_c
            Model_dict[str_model][c]['test'] = sum_MI_test_c

    output_path = os.path.join(os.sep, 'media', 'gonthier', 'HDD2',
                               'output_exp')

    if os.path.isdir(output_path):
        output_path_full = os.path.join(output_path, 'Covdata')
    else:
        output_path_full = os.path.join('data', 'Covdata')
    filename_path = os.path.join(output_path_full,
                                 'MutualInfo_' + target_dataset + '.pkl')
    # Warning ici tu ecrases le meme fichier
    with open(filename_path, 'wb') as handle:
        pickle.dump(Model_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)

    for c in range(num_classes):
        string = 'Classs ' + str(c)
        for set_ in ['trainval', 'test']:
            strings = string + ' ' + set_
            for constrNet, computeGlobalVariance in zip(
                    nets, computeGlobalVariance_tab):
                str_model = constrNet
                if computeGlobalVariance:
                    str_model += 'GlobalVar'
                strings += ' ' + str_model + ' : '
                sum_MI = Model_dict[str_model][c][set_]
                strings += "{:.2E}".format(sum_MI)
            strings += '\n'
            print(strings)
예제 #4
0
def compare_new_normStats_for_ResNet(target_dataset='Paintings',
                                     FineTunecase='All'):
    """ The goal of this function is to compare the new normalisation statistics of BN
    computed in the case of the adaptation of them 
    We will compare  ROWD (mean of variance) and variance global in the case 
    of ResNet50 
    
    We not use BNRF for the moment because it diverge
    """

    matplotlib.use(
        'Agg')  # To avoid to have the figure that's pop up during execution

    if FineTunecase == 'All2':
        nets = [
            'ResNet50', 'ResNet50', 'ResNet50', 'ResNet50', 'ResNet50',
            'ResNet50_ROWD_CUMUL', 'ResNet50_ROWD_CUMUL',
            'ResNet50_ROWD_CUMUL', 'ResNet50_ROWD_CUMUL_AdaIn'
        ]
        computeGlobalVariance_tab = [
            False, False, False, False, False, False, True, True, True
        ]
        kindmethod_tab = ['TL', 'FT', 'FT', 'FT', 'FT', 'TL', 'TL', 'FT', 'FT']
        pretrainingModif_tab = [
            False, 106, 106, 106, 106, False, False, False, False
        ]
        opt_option_tab = [[10**(-2)], [10**(-2)], [0.1, 10**(-2)], [10**(-3)],
                          [0.1, 10**(-3)], [10**(-2)], [10**(-2)], [10**(-2)],
                          [10**(-2)]]

    elif FineTunecase == 'All':
        nets = [
            'ResNet50', 'ResNet50', 'ResNet50', 'ResNet50_ROWD_CUMUL',
            'ResNet50_ROWD_CUMUL', 'ResNet50_ROWD_CUMUL',
            'ResNet50_ROWD_CUMUL_AdaIn'
        ]
        computeGlobalVariance_tab = [
            False, False, False, False, True, True, True
        ]
        kindmethod_tab = ['TL', 'FT', 'FT', 'TL', 'TL', 'FT', 'FT']
        pretrainingModif_tab = [False, 106, 106, False, False, False, False]
        opt_option_tab = [[10**(-2)], [10**(-2)], [0.1, 10**(-2)], [10**(-2)],
                          [10**(-2)], [10**(-2)], [10**(-2)]]

    elif FineTunecase == 'Cumul':
        nets = [
            'ResNet50', 'ResNet50_ROWD_CUMUL', 'ResNet50_ROWD_CUMUL',
            'ResNet50_ROWD_CUMUL_AdaIn'
        ]
        computeGlobalVariance_tab = [False, True, True, True]
        kindmethod_tab = ['TL', 'TL', 'FT', 'FT']
        pretrainingModif_tab = [False, False, False, False]
        opt_option_tab = [[10**(-2)], [10**(-2)], [10**(-2)], [10**(-2)]]

    elif FineTunecase == 'TL':
        nets = ['ResNet50', 'ResNet50_ROWD_CUMUL', 'ResNet50_ROWD_CUMUL']
        computeGlobalVariance_tab = [False, False, True]
        pretrainingModif_tab = [False, False, False]
        kindmethod_tab = ['TL', 'TL', 'TL']
        opt_option_tab = [[10**(-2)], [10**(-2)], [10**(-2)]]

    style_layers = getBNlayersResNet50()
    features = 'activation_48'
    normalisation = False
    final_clf = 'LinearSVC'  # Don t matter
    source_dataset = 'ImageNet'
    transformOnFinalLayer = 'GlobalAveragePooling2D'

    final_clf = 'MLP2'
    epochs = 20
    optimizer = 'SGD'
    return_best_model = True
    batch_size = 16
    dropout = None
    regulOnNewLayer = None
    nesterov = False
    SGDmomentum = 0.0
    decay = 0.0
    cropCenter = True
    # Load ResNet50 normalisation statistics

    list_bn_layers = getBNlayersResNet50()

    Model_dict = {}
    list_markers = [
        'o', 's', 'X', '*', 'v', '^', '<', '>', 'd', '1', '2', '3', '4', '8',
        'h', 'H', 'p', 'd', '$f$', 'P'
    ]
    alpha = 0.7

    idex = 0
    for constrNet,computeGlobalVariance,kind_method,pretrainingModif,opt_option in \
        zip(nets,computeGlobalVariance_tab,kindmethod_tab,pretrainingModif_tab,opt_option_tab):
        print('loading :', constrNet, computeGlobalVariance, kind_method,
              pretrainingModif, opt_option)
        output = learn_and_eval(target_dataset,source_dataset,final_clf,features,\
                               constrNet,kind_method,style_layers=style_layers,
                               normalisation=normalisation,transformOnFinalLayer=transformOnFinalLayer,
                               batch_size_RF=16,epochs_RF=20,momentum=0.9,ReDo=False,
                               returnStatistics=True,cropCenter=cropCenter,\
                               computeGlobalVariance=computeGlobalVariance,\
                               epochs=epochs,optimizer=optimizer,opt_option=opt_option,return_best_model=return_best_model,\
                               batch_size=batch_size,gridSearch=False,verbose=False)

        if 'ResNet50_ROWD_CUMUL' == constrNet and kind_method == 'TL':
            dict_stats_target, list_mean_and_std_target = output
            # print('dict_stats_target',dict_stats_target)
            # input('wait')
        else:
            dict_stats_target,list_mean_and_std_target = extract_Norm_stats_of_ResNet(output,\
                                                    res_num_layers=50,model_type=constrNet)
        str_model = constrNet
        if computeGlobalVariance:
            str_model += 'GlobalVar'
        if kind_method == 'FT':
            str_model += ' FT '
            if len(opt_option) == 1:
                str_model += 'lr ' + str(opt_option[0])
            if len(opt_option) == 2:
                str_model += 'lrp ' + str(opt_option[0]) + ' lr ' + str(
                    opt_option[1])
        #str_model = str_model.replace('ResNet50_','')
        str_model += str(idex)
        idex += 1
        Model_dict[str_model] = dict_stats_target

    print('Plotting the statistics in :')
    output_path = os.path.join(os.sep,'media','gonthier','HDD2','output_exp','Covdata',\
                               target_dataset,'CompBNstats')
    pathlib.Path(output_path).mkdir(parents=True, exist_ok=True)
    pltname = 'ResNet50_comparison_BN_statistics_ROWD'
    if cropCenter:
        pltname += '_cropCenter'
    pltname += '_' + FineTunecase
    pltname += '.pdf'
    pltname = os.path.join(output_path, pltname)
    print(pltname)
    pp = PdfPages(pltname)

    distances_means = {}
    distances_stds = {}
    ratios_means = {}
    ratios_stds = {}

    for layer_name in list_bn_layers:
        distances_means[layer_name] = []
        distances_stds[layer_name] = []
        ratios_means[layer_name] = []
        ratios_stds[layer_name] = []

        fig, (ax1, ax2) = plt.subplots(2, 1)
        str_title = 'Normalisation statistics ' + layer_name
        fig.suptitle(str_title)
        i = 0
        idex = 0
        for constrNet,computeGlobalVariance,kind_method,pretrainingModif,opt_option in \
            zip(nets,computeGlobalVariance_tab,kindmethod_tab,pretrainingModif_tab,opt_option_tab):

            str_model = constrNet
            if computeGlobalVariance:
                str_model += 'GlobalVar'
            if kind_method == 'FT':
                str_model += ' FT '
                if len(opt_option) == 1:
                    str_model += 'lr ' + str(opt_option[0])
                if len(opt_option) == 2:
                    str_model += 'lrp ' + str(opt_option[0]) + ' lr ' + str(
                        opt_option[1])
            label = str_model.replace('ResNet50_', '')
            str_model += str(idex)
            idex += 1
            dict_stats_target = Model_dict[str_model]
            stats_target = dict_stats_target[layer_name]
            means, stds = stats_target
            if constrNet == 'ResNet50' and kind_method == 'TL':
                ref_means = means
                ref_stds = stds
            else:
                diff_means = np.abs(ref_means - means)
                diff_stds = np.abs(ref_stds - stds)
                ratio_means = np.abs(means / ref_means)
                ratio_stds = np.abs(stds / ref_stds)
                distances_means[layer_name] += [diff_means]
                distances_stds[layer_name] += [diff_stds]
                ratios_means[layer_name] += [ratio_means]
                ratios_stds[layer_name] += [ratio_stds]
            x = np.arange(0, len(means))
            ax1.scatter(x,
                        means,
                        label=label,
                        marker=list_markers[i],
                        alpha=alpha)
            ax1.set_title('Normalisation Means')
            ax1.set_xlabel('Channel')
            ax1.set_ylabel('Mean')
            ax1.tick_params(axis='both', which='major', labelsize=3)
            ax1.tick_params(axis='both', which='minor', labelsize=3)
            ax1.legend(loc='best', prop={'size': 4})
            ax2.scatter(x,
                        stds,
                        label=label,
                        marker=list_markers[i],
                        alpha=alpha)
            ax2.set_title('Normalisation STDs')
            ax2.set_xlabel('Channel')
            ax2.set_ylabel('Std')
            ax2.tick_params(axis='both', which='major', labelsize=3)
            ax2.tick_params(axis='both', which='minor', labelsize=3)
            ax2.legend(loc='best', prop={'size': 4})
            i += 1

        #plt.show()
        plt.savefig(pp, format='pdf')
        plt.close()

    # Plot the boxplot of the distance between normalisation statistics
    # fig = plt.figure()
    # ax = plt.axes()
    # set_xticks= []
    # c = ['C1','C2','C3']
    # c = ['orange','green','red']
    # for i,layer_name in enumerate(list_bn_layers):
    #     positions = [i*3,i*3+1,i*3+2]
    #     set_xticks += [i*3+1]
    #     bp = plt.boxplot(np.log(distances_means[layer_name]).tolist(), positions = positions,
    #                      widths = 0.6,notch=True, patch_artist=True)
    #     for patch, color in zip(bp['boxes'], c):
    #         patch.set_facecolor(color)
    # ax.set_xticklabels(list_bn_layers)
    # ax.set_xticks(set_xticks)
    # plt.setp( ax.xaxis.get_majorticklabels(), rotation='vertical')
    # hO, = plt.plot([1,1],'C1-')
    # hG, = plt.plot([1,1],'C2-')
    # hR, = plt.plot([1,1],'C3-')
    # plt.title('Log Abs distance between means of refined and orignal.', fontsize=10)
    # plt.legend((hO, hG,hR),('ROWD','ROWD_global', 'BNRF'))
    # hO.set_visible(False)
    # hG.set_visible(False)
    # hR.set_visible(False)
    # plt.savefig(pp, format='pdf')
    # plt.close()

    # fig = plt.figure()
    # ax = plt.axes()
    # set_xticks= []

    # for i,layer_name in enumerate(list_bn_layers):
    #     positions = [i*3,i*3+1,i*3+2]
    #     set_xticks += [i*3+1]
    #     bp = plt.boxplot(np.log(distances_stds[layer_name]).tolist(), positions = positions,
    #                      widths = 0.6,notch=True, patch_artist=True)
    #     for patch, color in zip(bp['boxes'], c):
    #         patch.set_facecolor(color)
    # ax.set_xticklabels(list_bn_layers)
    # ax.set_xticks(set_xticks)
    # plt.setp( ax.xaxis.get_majorticklabels(), rotation='vertical')
    # hO, = plt.plot([1,1],'C1-')
    # hG, = plt.plot([1,1],'C2-')
    # hR, = plt.plot([1,1],'C3-')
    # plt.title('Log Abs distance between  stds of refined and orignal.', fontsize=10)
    # plt.legend((hO, hG,hR),('ROWD','ROWD_global', 'BNRF'))
    # hO.set_visible(False)
    # hG.set_visible(False)
    # hR.set_visible(False)
    # plt.savefig(pp, format='pdf')
    # plt.close()

    # # Plot the boxplot of the ratio between normalisation statistics
    # fig = plt.figure()
    # ax = plt.axes()
    # set_xticks= []
    # c = ['C1','C2','C3']
    # c = ['orange','green','red']
    # for i,layer_name in enumerate(list_bn_layers):
    #     positions = [i*3,i*3+1,i*3+2]
    #     set_xticks += [i*3+1]
    #     bp = plt.boxplot(np.log(1.+np.array(ratios_means[layer_name])).tolist(), positions = positions,
    #                      widths = 0.6,notch=True, patch_artist=True)
    #     for patch, color in zip(bp['boxes'], c):
    #         patch.set_facecolor(color)
    # ax.set_xticklabels(list_bn_layers)
    # ax.set_xticks(set_xticks)
    # plt.setp( ax.xaxis.get_majorticklabels(), rotation='vertical')
    # hO, = plt.plot([1,1],'C1-')
    # hG, = plt.plot([1,1],'C2-')
    # hR, = plt.plot([1,1],'C3-')
    # plt.title('Log 1+ Ratio between means of refined and orignal.', fontsize=10)
    # plt.legend((hO, hG,hR),('ROWD','ROWD_global', 'BNRF'))
    # hO.set_visible(False)
    # hG.set_visible(False)
    # hR.set_visible(False)
    # plt.savefig(pp, format='pdf')
    # plt.close()

    # fig = plt.figure()
    # ax = plt.axes()
    # set_xticks= []

    # for i,layer_name in enumerate(list_bn_layers):
    #     positions = [i*3,i*3+1,i*3+2]
    #     set_xticks += [i*3+1]
    #     bp = plt.boxplot(np.log(1.+np.array(ratios_stds[layer_name])).tolist(), positions = positions,
    #                      widths = 0.6,notch=True, patch_artist=True)
    #     for patch, color in zip(bp['boxes'], c):
    #         patch.set_facecolor(color)
    # ax.set_xticklabels(list_bn_layers)
    # ax.set_xticks(set_xticks)
    # plt.setp( ax.xaxis.get_majorticklabels(), rotation='vertical')
    # hO, = plt.plot([1,1],'C1-')
    # hG, = plt.plot([1,1],'C2-')
    # hR, = plt.plot([1,1],'C3-')
    # plt.title('Log 1+ ratio between stds of Refined model and original', fontsize=10)
    # plt.legend((hO, hG,hR),('ROWD','ROWD_global', 'BNRF'))
    # hO.set_visible(False)
    # hG.set_visible(False)
    # hR.set_visible(False)
    # plt.savefig(pp, format='pdf')
    # plt.close()

    pp.close()
    plt.clf()
예제 #5
0
def compare_Statistics_inFirstLayer_for_ResNet(target_dataset='Paintings'):
    """ The goal of this function is to compare the statistics of the features maps 
    between the base ResNet50 and the ResNet50_ROWD for Some set of ImageNet and ArtUK paintings 
    We will plot the histogram of all the possible values of each of the features maps for some model
    """

    output_path = os.path.join(os.sep, 'media', 'gonthier', 'HDD2',
                               'output_exp', 'Covdata', 'HistoOfAllValuesFM')
    pathlib.Path(output_path).mkdir(parents=True, exist_ok=True)

    matplotlib.use(
        'Agg')  # To avoid to have the figure that's pop up during execution
    number_im_considered = 2  #10000
    nets = ['ResNet50', 'ResNet50_ROWD_CUMUL']
    style_layers = getBNlayersResNet50()
    features = 'activation_48'
    normalisation = False
    getBeforeReLU = False
    final_clf = 'LinearSVC'  # Don t matter
    source_dataset = 'ImageNet'
    kind_method = 'TL'
    transformOnFinalLayer = 'GlobalAveragePooling2D'
    computeGlobalVariance_tab = [False, True]
    cropCenter = True
    saveformat = 'h5'
    # Load ResNet50 normalisation statistics

    list_bn_layers = getBNlayersResNet50()

    list_of_concern_layers = ['conv1', 'bn_conv1', 'activation']

    Model_dict = {}
    list_markers = ['o', 's', 'X', '*']
    alpha = 0.7

    dict_of_dict_hist = {}
    dict_of_dict = {}

    for constrNet, computeGlobalVariance in zip(nets,
                                                computeGlobalVariance_tab):
        output = learn_and_eval(target_dataset,source_dataset,final_clf,features,\
                               constrNet,kind_method,style_layers=style_layers,
                               normalisation=normalisation,transformOnFinalLayer=transformOnFinalLayer,
                               batch_size_RF=16,epochs_RF=20,momentum=0.9,ReDo=False,
                               returnStatistics=True,cropCenter=cropCenter,\
                               computeGlobalVariance=computeGlobalVariance)
        if 'ROWD' in constrNet:
            dict_stats_target, list_mean_and_std_target = output
            # Need to create the model
            model = ResNet_BaseNormOnlyOnBatchNorm_ForFeaturesExtraction(
                                   style_layers,list_mean_and_std_target=list_mean_and_std_target,\
                                   final_layer=features,\
                                   transformOnFinalLayer=transformOnFinalLayer,res_num_layers=50,\
                                   weights='imagenet')
        else:
            model = output  # In the case of ResNet50

        output_of_first_layer_net = get_those_layers_output(
            model, list_of_concern_layers)

        if constrNet == 'ResNet50':
            dataset_used = ['ImageNet', 'Paintings']
            set_used = [None, 'trainval']
        elif constrNet == 'ResNet50_ROWD_CUMUL':
            dataset_used = ['Paintings', 'Paintings']
            set_used = ['trainval', 'test']

        for dataset, set_ in zip(dataset_used, set_used):

            Netdatasetfull = constrNet + dataset + str(set_)
            list_imgs, images_in_set, number_im_list = get_list_im(dataset,
                                                                   set=set_)
            if not (number_im_considered is None):
                if number_im_considered >= number_im_list:
                    number_im_considered_tmp = None
                else:
                    number_im_considered_tmp = number_im_considered
            str_layers = getResNetLayersNumeral_bitsVersion(style_layers)
            filename = dataset + '_' + str(number_im_considered_tmp) + '_Hist4Params' +\
                '_'+str_layers
            if not (set_ == '' or set_ is None):
                filename += '_' + set_
            if getBeforeReLU:
                filename += '_BeforeReLU'
            if cropCenter:
                filename += '_cropCenter'
            if computeGlobalVariance:
                filename += '_computeGlobalVariance'
            if saveformat == 'pkl':
                filename += '.pkl'
            if saveformat == 'h5':
                filename += '.h5'
            filename_path = os.path.join(output_path, filename)
            if not os.path.isfile(filename_path):
                dict_var,dict_histo = Precompute_Cumulated_Hist_4Moments(filename_path,model_toUse=output_of_first_layer_net,\
                                                Net=constrNet,\
                                                list_of_concern_layers=list_of_concern_layers,number_im_considered=number_im_considered,\
                                                dataset=dataset,set=set_,saveformat=saveformat,cropCenter=cropCenter)

            else:
                print('We will load the features ')
                dict_var, dict_histo = load_Cumulated_Hist_4Moments(
                    filename_path, list_of_concern_layers, dataset)
            dict_of_dict[Netdatasetfull] = dict_var
            dict_of_dict_hist[Netdatasetfull] = dict_histo

    print('Start plotting ')
    # Plot the histograms (one per kernel for the different layers and save all in a pdf file)
    pltname = 'Hist_of_AllFeaturesMaps'
    labels = []
    dataset_tab = []
    for constrNet, computeGlobalVariance in zip(nets,
                                                computeGlobalVariance_tab):
        if constrNet == 'ResNet50':
            dataset_used = ['ImageNet', 'Paintings']
            set_used = [None, 'trainval']
        elif constrNet == 'ResNet50_ROWD_CUMUL':
            dataset_used = ['Paintings', 'Paintings']
            set_used = ['trainval', 'test']

        for dataset, set_ in zip(dataset_used, set_used):
            Netdatasetfull = constrNet + dataset + str(set_)
            dataset_tab += [Netdatasetfull]
            labels += [Netdatasetfull]
    pltname += str(number_im_considered)
    if getBeforeReLU:
        pltname += '_BeforeReLU'
    if cropCenter:
        pltname += '_cropCenter'

    pltname += '.pdf'
    pltname = os.path.join(output_path, pltname)
    pp = PdfPages(pltname)

    alpha = 0.7
    colors_full = ['red', 'green', 'blue', 'purple', 'orange', 'pink']
    colors_full = ['red', 'green', 'blue', 'purple', 'orange', 'pink']
    colors = colors_full[0:len(dataset_tab)]

    #    style_layers = [style_layers[0]]

    # Turn interactive plotting off
    plt.ioff()

    for l, layer in enumerate(list_of_concern_layers):
        print("Layer", layer)
        tab_vars = []
        for dataset in dataset_tab:
            dict_k = dict_of_dict_hist[Netdatasetfull][layer]
            num_features = 64  # Need to generalize that

        number_img_w = 4
        number_img_h = 4
        num_pages = num_features // (number_img_w * number_img_h)
        for p in range(num_pages):
            #f = plt.figure()  # Do I need this ?
            axes = []
            gs00 = gridspec.GridSpec(number_img_h, number_img_w)
            for j in range(number_img_w * number_img_h):
                ax = plt.subplot(gs00[j])
                axes += [ax]
            for k, ax in enumerate(axes):
                f_k = k + p * number_img_w * number_img_h

                n_bins = 1000
                for l, Netdatasetfull in enumerate(dataset_tab):
                    #                    x = np.vstack([tab_vars[0][:,f_k],tab_vars[1][:,f_k]])# Each line is a dataset
                    #                    x = x.reshape((-1,2))
                    dict_k = dict_of_dict_hist[Netdatasetfull][layer]
                    xtab, ytab = dict_k[f_k]
                    xtab_density = xtab / np.sum(xtab)
                    vars_values = tab_vars[l][:, f_k].reshape((-1, ))
                    xtab += [vars_values]
                    im = ax.bar(xtab_density,ytab, color=colors[l],\
                             alpha=alpha,label=labels[l])
                ax.tick_params(axis='both', which='major', labelsize=3)
                ax.tick_params(axis='both', which='minor', labelsize=3)
                ax.legend(loc='upper right', prop={'size': 2})
            titre = layer + ' ' + str(p)
            plt.suptitle(titre)

            #gs0.tight_layout(f)
            plt.savefig(pp, format='pdf')
            plt.close()
    pp.close()
    plt.clf()
예제 #6
0
def compare_Models_Plot_HistoDistrib_for_ResNet(target_dataset='Paintings'):
    """ The goal of this function is to compare the models fine tuned of with some statistics imposed
    
    We will compare , ROWD (mean of variance) and variance global in the case 
    of ResNet50 TL of FT """

    matplotlib.use(
        'Agg')  # To avoid to have the figure that's pop up during execution
    dataset = target_dataset
    nets = [
        'ResNet50', 'ResNet50_ROWD_CUMUL', 'ResNet50_ROWD_CUMUL',
        'ResNet50_BNRF'
    ]
    nets = [
        'ResNet50', 'ResNet50', 'ResNet50', 'ResNet50_ROWD_CUMUL',
        'ResNet50_ROWD_CUMUL'
    ]
    nets = [
        'ResNet50', 'ResNet50', 'ResNet50', 'ResNet50_ROWD_CUMUL',
        'ResNet50_ROWD_CUMUL', 'ResNet50_ROWD_CUMUL',
        'ResNet50_ROWD_CUMUL_AdaIn'
    ]
    #nets = ['ResNet50','ResNet50_ROWD_CUMUL','ResNet50_ROWD_CUMUL']
    style_layers = getBNlayersResNet50()
    features = 'activation_48'
    normalisation = False
    final_clf = 'LinearSVC'  # Don t matter
    source_dataset = 'ImageNet'
    kind_method = 'TL'
    transformOnFinalLayer = 'GlobalAveragePooling2D'
    computeGlobalVariance_tab = [False, False, False, False, True]
    computeGlobalVariance_tab = [False, False, False, False, True, True, True]
    #computeGlobalVariance_tab = [False,False,True]
    pretrainingModif_tab = [False, 106, 106, False, False]
    pretrainingModif_tab = [False, 106, 106, False, False, False, False]
    #pretrainingModif_tab = [False,False,False]
    kindmethod_tab = ['TL', 'FT', 'FT', 'TL', 'TL']
    kindmethod_tab = ['TL', 'FT', 'FT', 'TL', 'TL', 'FT', 'FT']
    #kindmethod_tab = ['TL','TL','TL']

    final_clf = 'MLP2'
    epochs = 20
    optimizer = 'SGD'
    opt_option_tab = [[10**(-2)], [10**(-2)], [0.1, 10**(-2)], [10**(-2)],
                      [10**(-2)]]
    opt_option_tab = [[10**(-2)], [10**(-2)], [0.1, 10**(-2)], [10**(-2)],
                      [10**(-2)], [10**(-2)], [10**(-2)]]
    return_best_model = True
    batch_size = 16

    cropCenter = True
    # Load ResNet50 normalisation statistics

    list_bn_layers = getBNlayersResNet50()

    Model_dict = {}
    Model_dict_histo = {}
    dict_num_fS = {}
    list_markers = [
        'o', 's', 'X', '*', 'v', '^', '<', '>', 'd', '1', '2', '3', '4', '8',
        'h', 'H', 'p', 'd', '$f$', 'P'
    ]
    alpha = 0.5
    number_im_considered = None
    #number_im_considered = 2

    ArtUKlist_imgs, images_in_set, number_im_list = get_list_im(dataset,
                                                                set='')
    if not (number_im_considered is None):
        if number_im_considered >= number_im_list:
            number_im_considered_tmp = None
        else:
            number_im_considered_tmp = number_im_considered
    else:
        number_im_considered_tmp = None

    idex = 0

    list_of_concern_layers = ['conv1', 'bn_conv1', 'activation']
    set = 'trainval'
    saveformat = 'h5'
    cropCenter = True
    histoFixe = True
    bins_tab = [
        np.arange(-500, 501),
        np.arange(-500, 501) / 250,
        np.arange(-500, 501) / 250
    ]
    width_tab = [1, 1. / 250, 1. / 250]
    #bins=np.arange(-500,501)

    output_path = os.path.join(os.sep, 'media', 'gonthier', 'HDD2',
                               'output_exp', 'Covdata')
    pathlib.Path(output_path).mkdir(parents=True, exist_ok=True)

    for constrNet,computeGlobalVariance,kind_method,pretrainingModif,opt_option in \
        zip(nets,computeGlobalVariance_tab,kindmethod_tab,pretrainingModif_tab,opt_option_tab):
        print('loading :', constrNet)
        output = learn_and_eval(target_dataset,source_dataset,final_clf,features,\
                               constrNet,kind_method,style_layers=style_layers,
                               normalisation=normalisation,transformOnFinalLayer=transformOnFinalLayer,
                               batch_size_RF=16,epochs_RF=20,momentum=0.9,ReDo=False,
                               returnStatistics=True,cropCenter=cropCenter,\
                               computeGlobalVariance=computeGlobalVariance,\
                               epochs=epochs,optimizer=optimizer,opt_option=opt_option,return_best_model=return_best_model,\
                               batch_size=batch_size,gridSearch=False)

        if 'ResNet50_ROWD_CUMUL' == constrNet and kind_method == 'TL':
            dict_stats_target, list_mean_and_std_target = output
            list_mean_and_std_source = None
            target_number_im_considered = None
            whatToload = 'varmean'
            target_set = 'trainval'

            model_toUse = ResNet_BaseNormOnlyOnBatchNorm_ForFeaturesExtraction(
                           style_layers,list_mean_and_std_target=list_mean_and_std_target,\
                           final_layer=features,\
                           transformOnFinalLayer='GlobalMaxPooling2D',res_num_layers=50,\
                           weights='imagenet')

        else:
            model_toUse = output

        Net = constrNet
        str_model = constrNet
        if computeGlobalVariance:
            str_model += 'GlobalVar'
        if kind_method == 'FT':
            str_model += ' FT '
            if len(opt_option) == 1:
                str_model += 'lr ' + str(opt_option[0])
            if len(opt_option) == 2:
                str_model += 'lrp ' + str(opt_option[0]) + ' lr ' + str(
                    opt_option[1])
        #str_model = str_model.replace('ResNet50_','')

        filename_path = dataset + '_' + str(
            number_im_considered_tmp) + '_CovMean' + str_model
        for l in list_of_concern_layers:
            filename_path += '_' + l
        if cropCenter:
            filename_path += '_cropCenter'
        if histoFixe:
            filename_path += '_histoFixe'

        if saveformat == 'pkl':
            filename_path += '.pkl'
        if saveformat == 'h5':
            filename_path += '.h5'

        filename_path = os.path.join(output_path, filename_path)

        str_model += str(idex)
        idex += 1
        if not os.path.isfile(filename_path):
            dict_var,dict_histo,dict_num_f = Precompute_Cumulated_Hist_4Moments(filename_path,model_toUse,Net,list_of_concern_layers,number_im_considered_tmp,\
                        dataset,set=set,saveformat=saveformat,cropCenter=cropCenter,histoFixe=histoFixe,\
                        bins_tab=bins_tab)
        else:
            dict_var, dict_histo, dict_num_f = load_Cumulated_Hist_4Moments(
                filename_path,
                list_of_concern_layers,
                dataset,
                saveformat='h5')
        Model_dict[str_model] = dict_var
        Model_dict_histo[str_model] = dict_histo
        dict_num_fS[str_model] = dict_num_f

    # Start plotting

    output_path = os.path.join(os.sep,'media','gonthier','HDD2','output_exp','Covdata',\
                               dataset,'CompModels')
    pathlib.Path(output_path).mkdir(parents=True, exist_ok=True)
    pltname = 'ResNetS_Hist_of_featuresValues' + dataset + '_'
    if cropCenter:
        pltname += '_cropCenter'
    pltname += '.pdf'
    pltname = os.path.join(output_path, pltname)
    pp = PdfPages(pltname)

    # Turn interactive plotting off
    plt.ioff()

    num_features = 64

    # number_img_w = len(nets)
    # number_img_h= len(list_of_concern_layers)
    num_pages = num_features

    for p in range(num_pages):
        print('Start plotting histo ', p)

        # Plot All the model on one figure
        number_img_w = 1
        number_img_h = len(list_of_concern_layers) + 1
        axes = []
        gs00 = gridspec.GridSpec(number_img_h, number_img_w)
        for j in range(number_img_w * number_img_h):
            ax = plt.subplot(gs00[j])
            axes += [ax]
        for l, z in enumerate(zip(list_of_concern_layers, bins_tab,
                                  width_tab)):
            layer, bins, width = z

            idex = 0
            hist_list_l_p = []
            labels = []
            for constrNet,computeGlobalVariance,kind_method,pretrainingModif,opt_option in \
                zip(nets,computeGlobalVariance_tab,kindmethod_tab,pretrainingModif_tab,opt_option_tab):
                Net = constrNet
                str_model = constrNet
                if computeGlobalVariance:
                    str_model += 'GlobalVar'
                if kind_method == 'FT':
                    str_model += ' FT '
                    if len(opt_option) == 1:
                        str_model += 'lr ' + str(opt_option[0])
                    if len(opt_option) == 2:
                        str_model += 'lrp ' + str(
                            opt_option[0]) + ' lr ' + str(opt_option[1])
                label = str_model.replace('ResNet50_', '')

                str_model += str(idex)
                idex += 1

                hists = Model_dict_histo[str_model][layer]
                hist_list_l_p += [hists[p]]
                labels += [label]
                #num_features = dict_num_fS[str_model][layer]

            ax = axes[l]
            binsMinusOne = bins[0:-1] + width / 2
            for n in range(len(hist_list_l_p)):
                ax.bar(binsMinusOne,
                       hist_list_l_p[n],
                       width=width,
                       label=labels[n],
                       alpha=alpha)
            ax.tick_params(axis='both', which='major', labelsize=3)
            ax.tick_params(axis='both', which='minor', labelsize=3)
            ax.legend(loc='upper right', prop={'size': 2})
            # fig_title = "m: {:.2E}, v: {:.2E}, s: {:.2E}, k: {:.2E}".format(m,v,s,k)
            fig_title = layer
            ax.set_title(fig_title, fontsize=5)
            if layer == 'activation':
                ax = axes[l + 1]

                binsMinusOne = bins[0:-1] + width / 2
                for n in range(len(hist_list_l_p)):
                    hist_without0 = hist_list_l_p[n]
                    hist_without0[500] = 0
                    hist_without0[501] = 0
                    ax.bar(binsMinusOne,
                           hist_without0,
                           width=width,
                           label=labels[n],
                           alpha=alpha)
                ax.tick_params(axis='both', which='major', labelsize=3)
                ax.tick_params(axis='both', which='minor', labelsize=3)
                ax.legend(loc='upper right', prop={'size': 2})
                # fig_title = "m: {:.2E}, v: {:.2E}, s: {:.2E}, k: {:.2E}".format(m,v,s,k)
                fig_title = layer + 'w/o 0'
                ax.set_title(fig_title, fontsize=5)
        titre = 'Feature ' + str(p)
        plt.suptitle(titre)
        plt.savefig(pp, format='pdf')
        plt.close()

        # Plot each model on one separated figure
        number_img_w = len(nets)
        number_img_h = len(list_of_concern_layers) + 1
        axes = []
        gs00 = gridspec.GridSpec(number_img_h, number_img_w)
        for j in range(number_img_w * number_img_h):
            ax = plt.subplot(gs00[j])
            axes += [ax]
        for l, z in enumerate(zip(list_of_concern_layers, bins_tab,
                                  width_tab)):
            layer, bins, width = z

            idex = 0
            hist_list_l_p = []
            #labels = []
            nn = 0
            for constrNet,computeGlobalVariance,kind_method,pretrainingModif,opt_option in \
                zip(nets,computeGlobalVariance_tab,kindmethod_tab,pretrainingModif_tab,opt_option_tab):
                Net = constrNet
                str_model = constrNet
                if computeGlobalVariance:
                    str_model += 'GlobalVar'
                if kind_method == 'FT':
                    str_model += ' FT '
                    if len(opt_option) == 1:
                        str_model += 'lr ' + str(opt_option[0])
                    if len(opt_option) == 2:
                        str_model += 'lrp ' + str(
                            opt_option[0]) + ' lr ' + str(opt_option[1])
                label = str_model.replace('ResNet50_', '')

                str_model += str(idex)
                idex += 1

                hists = Model_dict_histo[str_model][layer][p]
                ax = axes[l * number_img_w + nn]

                binsMinusOne = bins[0:-1] + width / 2
                ax.bar(binsMinusOne,
                       hists,
                       width=width,
                       label=label,
                       alpha=alpha)
                ax.tick_params(axis='both', which='major', labelsize=3)
                ax.tick_params(axis='both', which='minor', labelsize=3)
                ax.legend(loc='upper right', prop={'size': 2})
                # fig_title = "m: {:.2E}, v: {:.2E}, s: {:.2E}, k: {:.2E}".format(m,v,s,k)
                fig_title = layer
                ax.set_title(fig_title, fontsize=5)
                if layer == 'activation':
                    ax = axes[(l + 1) * number_img_w + nn]
                    binsMinusOne = bins[0:-1] + width / 2
                    hist_without0 = hists
                    hist_without0[500] = 0
                    hist_without0[501] = 0
                    ax.bar(binsMinusOne,
                           hist_without0,
                           width=width,
                           label=labels[n],
                           alpha=alpha)
                    ax.tick_params(axis='both', which='major', labelsize=3)
                    ax.tick_params(axis='both', which='minor', labelsize=3)
                    ax.legend(loc='upper right', prop={'size': 2})
                    # fig_title = "m: {:.2E}, v: {:.2E}, s: {:.2E}, k: {:.2E}".format(m,v,s,k)
                    fig_title = layer + 'w/o 0'
                    ax.set_title(fig_title, fontsize=5)
                nn += 1
        titre = 'Feature ' + str(p)
        plt.suptitle(titre)
        plt.savefig(pp, format='pdf')
        plt.close()
    pp.close()
    plt.clf()
# -*- coding: utf-8 -*-
"""
Created on Fri Jul  3 11:28:54 2020

Test of my reimplementation of Wildcat pooling 

@author: gonthier
"""

from StatsConstr_ClassifwithTL import learn_and_eval

learn_and_eval('IconArt_v1',source_dataset='ImageNet',final_clf='wildcat',
               features='activation_48',\
                constrNet='ResNet50',kind_method='FT',pretrainingModif=True,\
                optimizer='SGD',opt_option=[0.01],return_best_model=True,
                epochs=20,cropCenter=True,verbose=True,SaveInit=False,
                transformOnFinalLayer='noflatten')

learn_and_eval('IconArt_v1',source_dataset='ImageNet',final_clf='wildcat',
               features='activation_48',\
                constrNet='ResNet50',kind_method='FT',pretrainingModif=True,\
                optimizer='SGD',opt_option=[0.1,0.01],return_best_model=True,
                epochs=20,cropCenter=True,verbose=True,SaveInit=False,
                transformOnFinalLayer='noflatten',batch_size=16)

learn_and_eval('IconArt_v1',source_dataset='ImageNet',final_clf='wildcat',
               features='activation_48',\
                constrNet='ResNet50',kind_method='FT',pretrainingModif=True,\
                optimizer='SGD',opt_option=[0.01,0.1],return_best_model=True,
                epochs=20,cropCenter=False,verbose=True,SaveInit=False,
                transformOnFinalLayer='noflatten',batch_size=16)
예제 #8
0
def Use_of_diff_final_model_ResNet_VGG_InceptionV1():

    ReDo = False    

    for target_dataset in ['Paintings','IconArt_v1']:
    
        print('Same experiment with ResNet50 ')
        learn_and_eval(target_dataset,source_dataset='ImageNet',final_clf='LinearSVC',features='activation_48',\
                       transformOnFinalLayer='GlobalAveragePooling2D',
                       constrNet='ResNet50',kind_method='TL',gridSearch=True,
                       ReDo=ReDo,cropCenter=True,verbose=True)
        # ResNet50 Block1-5\_conv1 activation\_48 GlobalAveragePooling2D LinearSVCGS 
        # & 71.1 & 48.3 & 92.9 & 75.8 & 64.4 & 72.5 & 56.6 & 80.7 & 70.5 & 88.5 & 72.2 \\ 
        
        print('Same experiment with ResNet50 but a MLP1')
        learn_and_eval(target_dataset,source_dataset='ImageNet',final_clf='MLP1',features='activation_48',\
                       constrNet='ResNet50',kind_method='TL',gridSearch=False,ReDo=ReDo,\
                       transformOnFinalLayer='GlobalAveragePooling2D',cropCenter=True,
                       opt_option=[0.01],verbose=True,return_best_model=True)    
        
        print('Same experiment with ResNet50 but a MLP2')
        learn_and_eval(target_dataset,source_dataset='ImageNet',final_clf='MLP2',features='activation_48',\
                       constrNet='ResNet50',kind_method='TL',gridSearch=False,ReDo=ReDo,\
                       transformOnFinalLayer='GlobalAveragePooling2D',cropCenter=True,
                       opt_option=[0.01],verbose=True,return_best_model=True)  
        # & 57.3 & 34.0 & 89.7 & 68.9 & 51.5 & 62.4 & 45.9 & 72.9 & 60.5 & 77.1 & 62.0 \\
        
        print('Same experiment with ResNet50 but a MLP3 with decay etc')
        # 72.4 AP sur Paintings : a verifier car il y  avait de probleme avec le crop center fct
        learn_and_eval(target_dataset,source_dataset='ImageNet',final_clf='MLP3',features='activation_48',\
                       constrNet='ResNet50',kind_method='TL',gridSearch=False,ReDo=ReDo,\
                       transformOnFinalLayer='GlobalAveragePooling2D',cropCenter=True,\
                       regulOnNewLayer='l2',optimizer='SGD',opt_option=[0.01],\
                       epochs=20,nesterov=True,SGDmomentum=0.99,decay=0.0005,verbose=True,return_best_model=True)  
            
            
            
        print('Same experiment with VGG ')
        learn_and_eval(target_dataset,source_dataset='ImageNet',final_clf='LinearSVC',features='block5_pool',\
                       transformOnFinalLayer='GlobalAveragePooling2D',
                       constrNet='VGG',kind_method='TL',gridSearch=True,
                       ReDo=ReDo,cropCenter=True,verbose=True)
        # ResNet50 Block1-5\_conv1 activation\_48 GlobalAveragePooling2D LinearSVCGS 
        # & 71.1 & 48.3 & 92.9 & 75.8 & 64.4 & 72.5 & 56.6 & 80.7 & 70.5 & 88.5 & 72.2 \\ 
        
        print('Same experiment with VGG but a MLP1')
        learn_and_eval(target_dataset,source_dataset='ImageNet',final_clf='MLP1',features='block5_pool',\
                       constrNet='VGG',kind_method='TL',gridSearch=False,ReDo=ReDo,\
                       transformOnFinalLayer='GlobalAveragePooling2D',cropCenter=True,
                       opt_option=[0.01],verbose=True,return_best_model=True)      
        
        print('Same experiment with VGG but a MLP2')
        learn_and_eval(target_dataset,source_dataset='ImageNet',final_clf='MLP2',features='block5_pool',\
                       constrNet='VGG',kind_method='TL',gridSearch=False,ReDo=ReDo,\
                       transformOnFinalLayer='GlobalAveragePooling2D',cropCenter=True,
                       opt_option=[0.01],verbose=True,return_best_model=True)  
        # & 57.3 & 34.0 & 89.7 & 68.9 & 51.5 & 62.4 & 45.9 & 72.9 & 60.5 & 77.1 & 62.0 \\
        
        print('Same experiment with VGG but a MLP3 with decay etc')
        # 72.4 AP sur Paintings : a verifier car il y  avait de probleme avec le crop center fct
        learn_and_eval(target_dataset,source_dataset='ImageNet',final_clf='MLP3',features='block5_pool',\
                       constrNet='VGG',kind_method='TL',gridSearch=False,ReDo=ReDo,\
                       transformOnFinalLayer='GlobalAveragePooling2D',cropCenter=True,\
                       regulOnNewLayer='l2',optimizer='SGD',opt_option=[0.01],\
                       epochs=20,nesterov=True,SGDmomentum=0.99,decay=0.0005,verbose=True,return_best_model=True)  
            
        print('Same experiment with InceptionV1 ')
        learn_and_eval(target_dataset,source_dataset='ImageNet',final_clf='LinearSVC',features='avgpool',\
                       transformOnFinalLayer='',
                       constrNet='InceptionV1',kind_method='TL',gridSearch=True,
                       ReDo=ReDo,cropCenter=True,verbose=True,return_best_model=True)  
        # ResNet50 Block1-5\_conv1 activation\_48 GlobalAveragePooling2D LinearSVCGS 
        # & 71.1 & 48.3 & 92.9 & 75.8 & 64.4 & 72.5 & 56.6 & 80.7 & 70.5 & 88.5 & 72.2 \\ 
        
        print('Same experiment with InceptionV1 but a MLP2')
        learn_and_eval(target_dataset,source_dataset='ImageNet',final_clf='MLP1',features='avgpool',\
                       constrNet='InceptionV1',kind_method='TL',gridSearch=False,ReDo=ReDo,\
                       transformOnFinalLayer='',cropCenter=True,
                       opt_option=[0.01],verbose=True,return_best_model=True)  
            
        print('Same experiment with InceptionV1 but a MLP2')
        learn_and_eval(target_dataset,source_dataset='ImageNet',final_clf='MLP2',features='avgpool',\
                       constrNet='InceptionV1',kind_method='TL',gridSearch=False,ReDo=ReDo,\
                       transformOnFinalLayer='',cropCenter=True,
                       opt_option=[0.01],verbose=True,return_best_model=True)  
        # & 57.3 & 34.0 & 89.7 & 68.9 & 51.5 & 62.4 & 45.9 & 72.9 & 60.5 & 77.1 & 62.0 \\
        
        print('Same experiment with InceptionV1 but a MLP3 with decay etc')
        # 72.4 AP sur Paintings : a verifier car il y  avait de probleme avec le crop center fct
        learn_and_eval(target_dataset,source_dataset='ImageNet',final_clf='MLP3',features='avgpool',\
                       constrNet='InceptionV1',kind_method='TL',gridSearch=False,ReDo=ReDo,\
                       transformOnFinalLayer='',cropCenter=True,\
                       regulOnNewLayer='l2',optimizer='SGD',opt_option=[0.01],\
                       epochs=20,nesterov=True,SGDmomentum=0.99,decay=0.0005,verbose=True,return_best_model=True)  
예제 #9
0
def perf_IconArt_ArtUK_RASTA_baseline_TL():
    
    print('RASTA')
    learn_and_eval('RASTA',source_dataset='ImageNet',final_clf='MLP1',features='block5_pool',\
                constrNet='VGG',kind_method='FT',gridSearch=False,ReDo=False,\
                pretrainingModif=False,return_best_model=True,weights='imagenet',\
                optimizer='SGD',opt_option=[0.01],
                transformOnFinalLayer='GlobalAveragePooling2D',
                epochs=20,cropCenter=True,verbose=True,SGDmomentum=0.9,decay=1e-4)  
    learn_and_eval('RASTA',source_dataset='ImageNet',final_clf='MLP1',features='conv5_block3_out',\
                constrNet='ResNet50',kind_method='FT',gridSearch=False,ReDo=False,\
                pretrainingModif=False,return_best_model=True,weights='imagenet',\
                optimizer='SGD',opt_option=[0.01],
                transformOnFinalLayer='GlobalAveragePooling2D',
                epochs=20,cropCenter=True,verbose=True,SGDmomentum=0.9,decay=1e-4)
    
    print('IconArt - VGG')
    # Diverge
    # learn_and_eval('IconArt_v1',source_dataset='ImageNet',final_clf='MLP1',features='block5_pool',\
    #             constrNet='VGG',kind_method='FT',gridSearch=False,ReDo=False,\
    #             pretrainingModif=False,return_best_model=True,weights='imagenet',\
    #             optimizer='SGD',opt_option=[0.01],
    #             transformOnFinalLayer='GlobalAveragePooling2D',
    #             epochs=20,cropCenter=True,verbose=True,SGDmomentum=0.9,decay=1e-4)  
    learn_and_eval('IconArt_v1',source_dataset='ImageNet',final_clf='MLP1',features='block5_pool',\
                constrNet='VGG',kind_method='FT',gridSearch=False,ReDo=False,\
                pretrainingModif=False,return_best_model=True,weights='imagenet',\
                optimizer='SGD',opt_option=[0.1,0.01],
                transformOnFinalLayer='GlobalAveragePooling2D',
                epochs=20,cropCenter=True,verbose=True,SGDmomentum=0.9,decay=1e-4)  
    learn_and_eval('IconArt_v1',source_dataset='ImageNet',final_clf='MLP1',features='block5_pool',\
                constrNet='VGG',kind_method='FT',gridSearch=False,ReDo=False,\
                pretrainingModif=False,return_best_model=True,weights='RASTA_small01_modif_GAP',\
                optimizer='SGD',opt_option=[0.01],
                transformOnFinalLayer='GlobalAveragePooling2D',
                epochs=20,cropCenter=True,verbose=True,SGDmomentum=0.9,decay=1e-4)  
    learn_and_eval('IconArt_v1',source_dataset='ImageNet',final_clf='MLP1',features='block5_pool',\
                constrNet='VGG',kind_method='FT',gridSearch=False,ReDo=False,\
                pretrainingModif=False,return_best_model=True,weights='RASTA_big0001_modif_GAP_adam_unfreeze8_RandForUnfreezed_SmallDataAug_ep200',\
                optimizer='SGD',opt_option=[0.01],
                transformOnFinalLayer='GlobalAveragePooling2D',
                epochs=20,cropCenter=True,verbose=True,SGDmomentum=0.9,decay=1e-4)  
    learn_and_eval('IconArt_v1',source_dataset='ImageNet',final_clf='MLP1',features='block5_pool',\
                constrNet='VGG',kind_method='FT',gridSearch=False,ReDo=False,\
                pretrainingModif=False,return_best_model=True,weights='RASTA_big001_modif_GAP_RandInit_randomCrop_ep200_LRschedG',\
                optimizer='SGD',opt_option=[0.01],
                transformOnFinalLayer='GlobalAveragePooling2D',
                epochs=20,cropCenter=True,verbose=True,SGDmomentum=0.9,decay=1e-4)  
    
    
    print('IconArt - ResNet50')
    learn_and_eval('IconArt_v1',source_dataset='ImageNet',final_clf='MLP1',features='conv5_block3_out',\
                constrNet='ResNet50',kind_method='FT',gridSearch=False,ReDo=False,\
                pretrainingModif=False,return_best_model=True,weights='imagenet',\
                optimizer='SGD',opt_option=[0.01],
                transformOnFinalLayer='GlobalAveragePooling2D',
                epochs=20,cropCenter=True,verbose=True,SGDmomentum=0.9,decay=1e-4)
    learn_and_eval('IconArt_v1',source_dataset='ImageNet',final_clf='MLP1',features='conv5_block3_out',\
                constrNet='ResNet50',kind_method='FT',gridSearch=False,ReDo=False,\
                pretrainingModif=False,return_best_model=True,weights='RASTA_small01_modif_GAP',\
                optimizer='SGD',opt_option=[0.01],
                transformOnFinalLayer='GlobalAveragePooling2D',
                epochs=20,cropCenter=True,verbose=True,SGDmomentum=0.9,decay=1e-4)
    learn_and_eval('IconArt_v1',source_dataset='ImageNet',final_clf='MLP1',features='conv5_block3_out',\
                constrNet='ResNet50',kind_method='FT',gridSearch=False,ReDo=False,\
                pretrainingModif=False,return_best_model=True,weights='RASTA_big0001_modif_GAP_adam_unfreeze20_RandForUnfreezed_SmallDataAug_ep200',\
                optimizer='SGD',opt_option=[0.01],
                transformOnFinalLayer='GlobalAveragePooling2D',
                epochs=20,cropCenter=True,verbose=True,SGDmomentum=0.9,decay=1e-4)
    learn_and_eval('IconArt_v1',source_dataset='ImageNet',final_clf='MLP1',features='conv5_block3_out',\
                constrNet='ResNet50',kind_method='FT',gridSearch=False,ReDo=False,\
                pretrainingModif=False,return_best_model=True,weights='RASTA_big001_modif_GAP_RandInit_randomCrop_ep200_LRschedG',\
                optimizer='SGD',opt_option=[0.01],
                transformOnFinalLayer='GlobalAveragePooling2D',
                epochs=20,cropCenter=True,verbose=True,SGDmomentum=0.9,decay=1e-4)
    
    print('Paintings - VGG')
    learn_and_eval('Paintings',source_dataset='ImageNet',final_clf='MLP1',features='block5_pool',\
                constrNet='VGG',kind_method='FT',gridSearch=False,ReDo=False,\
                pretrainingModif=False,return_best_model=True,weights='imagenet',\
                optimizer='SGD',opt_option=[0.1,0.01],
                transformOnFinalLayer='GlobalAveragePooling2D',
                epochs=20,cropCenter=True,verbose=True,SGDmomentum=0.9,decay=1e-4)  
    learn_and_eval('Paintings',source_dataset='ImageNet',final_clf='MLP1',features='block5_pool',\
                constrNet='VGG',kind_method='FT',gridSearch=False,ReDo=False,\
                pretrainingModif=False,return_best_model=True,weights='RASTA_small01_modif_GAP',\
                optimizer='SGD',opt_option=[0.01],
                transformOnFinalLayer='GlobalAveragePooling2D',
                epochs=20,cropCenter=True,verbose=True,SGDmomentum=0.9,decay=1e-4)  
    learn_and_eval('Paintings',source_dataset='ImageNet',final_clf='MLP1',features='block5_pool',\
                constrNet='VGG',kind_method='FT',gridSearch=False,ReDo=False,\
                pretrainingModif=False,return_best_model=True,weights='RASTA_big0001_modif_GAP_adam_unfreeze8_RandForUnfreezed_SmallDataAug_ep200',\
                optimizer='SGD',opt_option=[0.01],
                transformOnFinalLayer='GlobalAveragePooling2D',
                epochs=20,cropCenter=True,verbose=True,SGDmomentum=0.9,decay=1e-4)  
    learn_and_eval('Paintings',source_dataset='ImageNet',final_clf='MLP1',features='block5_pool',\
                constrNet='VGG',kind_method='FT',gridSearch=False,ReDo=False,\
                pretrainingModif=False,return_best_model=True,weights='RASTA_big001_modif_GAP_RandInit_randomCrop_ep200_LRschedG',\
                optimizer='SGD',opt_option=[0.01],
                transformOnFinalLayer='GlobalAveragePooling2D',
                epochs=20,cropCenter=True,verbose=True,SGDmomentum=0.9,decay=1e-4)  
    
    
    print('Paintings - ResNet50')
    learn_and_eval('Paintings',source_dataset='ImageNet',final_clf='MLP1',features='conv5_block3_out',\
                constrNet='ResNet50',kind_method='FT',gridSearch=False,ReDo=False,\
                pretrainingModif=False,return_best_model=True,weights='imagenet',\
                optimizer='SGD',opt_option=[0.01],
                transformOnFinalLayer='GlobalAveragePooling2D',
                epochs=20,cropCenter=True,verbose=True,SGDmomentum=0.9,decay=1e-4)
    learn_and_eval('Paintings',source_dataset='ImageNet',final_clf='MLP1',features='conv5_block3_out',\
                constrNet='ResNet50',kind_method='FT',gridSearch=False,ReDo=False,\
                pretrainingModif=False,return_best_model=True,weights='RASTA_small01_modif_GAP',\
                optimizer='SGD',opt_option=[0.01],
                transformOnFinalLayer='GlobalAveragePooling2D',
                epochs=20,cropCenter=True,verbose=True,SGDmomentum=0.9,decay=1e-4)
    learn_and_eval('Paintings',source_dataset='ImageNet',final_clf='MLP1',features='conv5_block3_out',\
                constrNet='ResNet50',kind_method='FT',gridSearch=False,ReDo=False,\
                pretrainingModif=False,return_best_model=True,weights='RASTA_big0001_modif_GAP_adam_unfreeze20_RandForUnfreezed_SmallDataAug_ep200',\
                optimizer='SGD',opt_option=[0.01],
                transformOnFinalLayer='GlobalAveragePooling2D',
                epochs=20,cropCenter=True,verbose=True,SGDmomentum=0.9,decay=1e-4)
    learn_and_eval('Paintings',source_dataset='ImageNet',final_clf='MLP1',features='conv5_block3_out',\
                constrNet='ResNet50',kind_method='FT',gridSearch=False,ReDo=False,\
                pretrainingModif=False,return_best_model=True,weights='RASTA_big001_modif_GAP_RandInit_randomCrop_ep200_LRschedG',\
                optimizer='SGD',opt_option=[0.01],
                transformOnFinalLayer='GlobalAveragePooling2D',
                epochs=20,cropCenter=True,verbose=True,SGDmomentum=0.9,decay=1e-4)
예제 #10
0
def exp_BN_only():
    """In this exp we only fine-tuned the batch normalization of the models"""
    
    target_dataset_tab = ['Paintings','IconArt_v1']
    
    ## VGG VGGAdaIn train only the batch normalisation
    constrNet = 'VGGAdaIn'
    style_layers = ['block1_conv1','block1_conv2','block2_conv1','block2_conv2',
    'block3_conv1','block3_conv2','block3_conv3','block3_conv4',
    'block4_conv1','block4_conv2','block4_conv3','block4_conv4', 
    'block5_conv1','block5_conv2','block5_conv3','block5_conv4']
    opt_option_tab=[[0.1,0.01],[0.01]]
    optimizer='SGD'
    SGDmomentum=0.9
    decay=1e-4
    features = 'block5_pool'
    final_clf = 'MLP1'
    transformOnFinalLayer='GlobalAveragePooling2D'
    return_best_model = True
    epochs=20
    cropCenter = True
    getBeforeReLU = True
    
    regulOnNewLayer = None
    nesterov = False
    dropout = None
    onlyPlot = False
    freezingType = 'FromBottom'

    pretrainingModif = False
    
    for opt_option in opt_option_tab:
        print('opt_option',opt_option)
        for target_dataset in target_dataset_tab:
        #            print(constrNet,style_layers)
            metrics = learn_and_eval(target_dataset,constrNet=constrNet,kind_method='FT',\
                                    epochs=epochs,transformOnFinalLayer=transformOnFinalLayer,\
                                    forLatex=True,optimizer=optimizer,\
                                    pretrainingModif=pretrainingModif,freezingType=freezingType,\
                                    opt_option=opt_option,cropCenter=cropCenter,\
                                    style_layers=style_layers,getBeforeReLU=getBeforeReLU,\
                                    final_clf=final_clf,features=features,\
                                    return_best_model=return_best_model,\
                                    onlyReturnResult=onlyPlot,verbose=True,\
                                    dropout=dropout,regulOnNewLayer=regulOnNewLayer,\
                                    nesterov=nesterov,SGDmomentum=SGDmomentum,decay=decay)
            print(target_dataset,metrics)
                
    ## ResNet50 :
        
    constrNet = 'ResNet50AdaIn'
    getBeforeReLU = True
    #batch_size = 16 
    features = 'conv5_block3_out' 
    #style_layers = getBNlayersResNet50() # En fait cela ne marche pas il y a un soucis 
    # getBNlayersResNet50 ne correspond pas aux couches dans le modele et je en sais pas pourquoi 
    style_layers = ['conv1_bn',
                    'conv2_block1_1_bn', 
                    'conv2_block1_2_bn', 
                    'conv2_block1_0_bn', 
                    'conv2_block1_3_bn', 
                    'conv2_block2_1_bn', 
                    'conv2_block2_2_bn', 
                    'conv2_block2_3_bn', 
                    'conv2_block3_1_bn', 
                    'conv2_block3_2_bn', 
                    'conv2_block3_3_bn', 
                    'conv3_block1_1_bn', 
                    'conv3_block1_2_bn', 
                    'conv3_block1_0_bn', 
                    'conv3_block1_3_bn', 
                    'conv3_block2_1_bn', 
                    'conv3_block2_2_bn', 
                    'conv3_block2_3_bn', 
                    'conv3_block3_1_bn', 
                    'conv3_block3_2_bn', 
                    'conv3_block3_3_bn', 
                    'conv3_block4_1_bn', 
                    'conv3_block4_2_bn', 
                    'conv3_block4_3_bn', 
                    'conv4_block1_1_bn', 
                    'conv4_block1_2_bn', 
                    'conv4_block1_0_bn', 
                    'conv4_block1_3_bn', 
                    'conv4_block2_1_bn', 
                    'conv4_block2_2_bn',  
                    'conv4_block2_3_bn',  
                    'conv4_block3_1_bn',  
                    'conv4_block3_2_bn',  
                    'conv4_block3_3_bn', 
                    'conv4_block4_1_bn', 
                    'conv4_block4_2_bn', 
                    'conv4_block4_3_bn', 
                    'conv4_block5_1_bn', 
                    'conv4_block5_2_bn', 
                    'conv4_block5_3_bn', 
                    'conv4_block6_1_bn', 
                    'conv4_block6_2_bn', 
                    'conv4_block6_3_bn',  
                    'conv5_block1_1_bn', 
                    'conv5_block1_2_bn', 
                    'conv5_block1_0_bn', 
                    'conv5_block1_3_bn',  
                    'conv5_block2_1_bn', 
                    'conv5_block2_2_bn',  
                    'conv5_block2_3_bn',
                    'conv5_block3_1_bn', 
                    'conv5_block3_2_bn',
                    'conv5_block3_3_bn']    
    for opt_option in opt_option_tab:
        print('opt_option',opt_option)
        for target_dataset in target_dataset_tab:
    #            print(constrNet,style_layers)
            metrics = learn_and_eval(target_dataset=target_dataset,constrNet=constrNet,\
                                     forLatex=True,verbose=True,\
                                     kind_method='FT',epochs=epochs,transformOnFinalLayer=transformOnFinalLayer,\
                                     pretrainingModif=pretrainingModif,freezingType=freezingType,\
                                     optimizer=optimizer,opt_option=opt_option, #batch_size=batch_size,\
                                     final_clf=final_clf,features=features,return_best_model=return_best_model,\
                                     onlyReturnResult=onlyPlot,style_layers=style_layers,
                                     cropCenter=cropCenter,dropout=dropout,regulOnNewLayer=regulOnNewLayer,\
                                     nesterov=nesterov,SGDmomentum=SGDmomentum,decay=decay)
            print(target_dataset,metrics)
예제 #11
0
#     constrNet='ResNet50',kind_method='FT',gridSearch=False,ReDo=True,\
#     transformOnFinalLayer='GlobalAveragePooling2D',cropCenter=True,\
#     regulOnNewLayer=None,optimizer='SGD',opt_option=[0.1,0.01],\
#     epochs=5,SGDmomentum=0.9,decay=1e-4,batch_size=16,pretrainingModif=True,\
#     suffix='testdebug',plotConv=True,verbose=True,clipnorm=10.)

# learn_and_eval(target_dataset,source_dataset='ImageNet',final_clf='MLP2',features='block5_pool',\
#     constrNet='ResNet50',kind_method='FT',gridSearch=False,ReDo=False,\
#     transformOnFinalLayer='GlobalAveragePooling2D',cropCenter=True,\
#     regulOnNewLayer=None,optimizer='Padam',opt_option=[0.1],\
#     epochs=5,SGDmomentum=0.9,decay=1e-4,batch_size=16,pretrainingModif=True,\
#     suffix='testdebug',plotConv=True,verbose=True)

learn_and_eval(target_dataset,source_dataset='ImageNet',final_clf='MLP2',features='block5_pool',\
    constrNet='ResNet50',kind_method='FT',gridSearch=False,ReDo=False,\
    transformOnFinalLayer='GlobalAveragePooling2D',cropCenter=True,\
    regulOnNewLayer=None,optimizer='Padam',opt_option=[0.1,0.1],\
    epochs=5,SGDmomentum=0.9,decay=1e-4,batch_size=16,pretrainingModif=True,\
    suffix='testdebug',plotConv=True,verbose=True)
# & 70.6 & 49.2 & 93.4 & 74.6 & 64.7 & 73.8 & 58.6 & 81.0 & 73.1 & 86.5 & 72.6 \\

learn_and_eval(target_dataset,source_dataset='ImageNet',final_clf='MLP2',features='block5_pool',\
    constrNet='ResNet50',kind_method='FT',gridSearch=False,ReDo=False,\
    transformOnFinalLayer='GlobalAveragePooling2D',cropCenter=True,\
    regulOnNewLayer=None,optimizer='Padam',opt_option=[0.1,0.01],\
    epochs=5,SGDmomentum=0.9,decay=1e-4,batch_size=16,pretrainingModif=True,\
    suffix='testdebug',plotConv=True,verbose=True)
# & 61.2 & 45.2 & 92.4 & 72.1 & 55.2 & 68.7 & 54.6 & 79.1 & 63.9 & 83.7 & 67.6 \\
learn_and_eval(target_dataset,source_dataset='ImageNet',final_clf='MLP1',features='block5_pool',\
    constrNet='ResNet50',kind_method='FT',gridSearch=False,ReDo=False,\
    transformOnFinalLayer='GlobalAveragePooling2D',cropCenter=True,\
    regulOnNewLayer=None,optimizer='Padam',opt_option=[0.1,0.05],\
def DeepDream_withFinedModel():
    """
    This function will load the two models (deep nets) before and after fine-tuning 
    and then compute the difference between the weights and finally run a 
    deep dream on the feature maps of the weights that have the most change
    """

    target_dataset = 'IconArt_v1'
    output_path = os.path.join(os.sep, 'media', 'gonthier', 'HDD2',
                               'output_exp', 'Covdata', 'DeepDream',
                               target_dataset)
    pathlib.Path(output_path).mkdir(parents=True, exist_ok=True)

    matplotlib.use(
        'Agg')  # To avoid to have the figure that's pop up during execution
    features = 'block5_pool'
    normalisation = False
    getBeforeReLU = False
    final_clf = 'LinearSVC'  # Don t matter
    source_dataset = 'ImageNet'
    kind_method = 'FT'
    transformOnFinalLayer = 'GlobalAveragePooling2D'

    Model_dict = {}
    list_markers = ['o', 's', 'X', '*']
    alpha = 0.7

    dict_of_dict_hist = {}
    dict_of_dict = {}
    constrNet = 'VGG'

    weights = 'imagenet'

    if 'VGG' in constrNet:
        imagenet_model = tf.keras.applications.vgg19.VGG19(include_top=False,
                                                           weights=weights)
        net_layers = imagenet_model.layers
    else:
        raise (NotImplementedError)

    list_weights = []
    list_name_layers = []
    for original_layer in net_layers:
        # check for convolutional layer
        layer_name = original_layer.name
        if not ('conv' in layer_name):
            continue
        # get filter weights
        o_weights = original_layer.get_weights()  # o_filters, o_biases
        list_weights += [o_weights]
        list_name_layers += [layer_name]

    final_clf = 'MLP2'

    computeGlobalVariance = False
    optimizer = 'SGD'
    opt_option = [0.1, 0.001]
    return_best_model = True
    epochs = 20
    cropCenter = True
    SGDmomentum = 0.9
    decay = 1e-4

    returnStatistics = True
    net_finetuned = learn_and_eval(target_dataset,source_dataset,final_clf,features,\
                           constrNet,kind_method,style_layers=[],weights=weights,\
                           normalisation=normalisation,transformOnFinalLayer=transformOnFinalLayer,
                           ReDo=False,
                           returnStatistics=returnStatistics,cropCenter=cropCenter,\
                           optimizer=optimizer,opt_option=opt_option,epochs=epochs,\
                           SGDmomentum=SGDmomentum,decay=decay,return_best_model=return_best_model)
    finetuned_layers = net_finetuned.layers

    dict_layers_argsort = {}
    dict_layers_mean_squared = {}
    j = 0
    for finetuned_layer in finetuned_layers:
        # check for convolutional layer
        layer_name = finetuned_layer.name
        if not ('conv' in layer_name):
            continue
        # get filter weights
        if not (layer_name in list_name_layers):
            continue
        o_filters, o_biases = list_weights[j]
        j += 1
        f_filters, f_biases = finetuned_layer.get_weights()
        print(layer_name, f_filters.shape)
        num_filters = o_filters.shape[-1]
        # Norm 2 between the weights of the filters

        diff_filters = o_filters - f_filters
        norm2_filter = np.mean(o_filters**2, axis=(0, 1, 2))
        norm1_filter = np.mean(np.abs(o_filters), axis=(0, 1, 2))
        diff_squared = diff_filters**2
        diff_abs = np.abs(diff_filters)
        mean_squared = np.mean(diff_squared, axis=(0, 1, 2))
        mean_abs = np.mean(diff_abs, axis=(0, 1, 2))
        relative_diff_squared = mean_squared / norm2_filter
        relative_diff_abs = mean_abs / norm1_filter
        print('== For layer :', layer_name, ' ==')
        print('= Absolute squared of difference =')
        print_stats_on_diff(mean_squared)
        print('= Absolute abs of difference =')
        print_stats_on_diff(mean_abs)
        print('= Relative squared of difference =')
        print_stats_on_diff(relative_diff_squared)
        print('= Relative abs of difference =')
        print_stats_on_diff(relative_diff_abs)

        dict_layers_mean_squared[layer_name] = mean_squared
        argsort = np.argsort(mean_squared)[::-1]
        dict_layers_argsort[layer_name] = argsort

    K.set_learning_phase(0)
def saliencyMap_ImageSize():
    """
    Fonction pour tester sur une image la saliency map
    """
    
    target_dataset = 'IconArt_v1'
    style_layers = getBNlayersResNet50()
    features = 'activation_48'
    normalisation = False
    final_clf= 'LinearSVC' # Don t matter
    source_dataset=  'ImageNet'
    transformOnFinalLayer='GlobalAveragePooling2D'

    final_clf = 'MLP2'
    epochs = 20
    optimizer = 'SGD'
    return_best_model = True
    batch_size= 16
    dropout=None
    regulOnNewLayer=None
    nesterov=False
    SGDmomentum=0.9
    decay=1e-4
    cropCenter = False
    # Load ResNet50 normalisation statistics
    
    opt_option = [0.1,0.01]
    pretrainingModif = True
    kind_method = 'FT'
    computeGlobalVariance = False
    constrNet = 'ResNet50'
    #list_bn_layers = getBNlayersResNet50()

    Model_dict = {}
    list_markers = ['o','s','X','*','v','^','<','>','d','1','2','3','4','8','h','H','p','d','$f$','P']
    alpha = 0.7
    sizeIm = 224
    Net = constrNet
    
    
    print('loading :',constrNet,computeGlobalVariance,kind_method,pretrainingModif,opt_option)         
    model = learn_and_eval(target_dataset,source_dataset,final_clf,features,\
                           constrNet,kind_method,style_layers=style_layers,
                           normalisation=normalisation,transformOnFinalLayer=transformOnFinalLayer,
                           batch_size_RF=16,epochs_RF=20,momentum=0.9,ReDo=False,
                           returnStatistics=True,cropCenter=cropCenter,\
                           computeGlobalVariance=computeGlobalVariance,\
                           epochs=epochs,optimizer=optimizer,opt_option=opt_option,
                           return_best_model=return_best_model,\
                           batch_size=batch_size,gridSearch=False,verbose=True)
    # Performance : & 54.4 & 76.3 & 60.7 & 82.1 & 74.3 & 70.6 & 11.0 & 61.3 \\ 
    
        
    item_name,path_to_img,default_path_imdb,classes,ext,num_classes,str_val,df_label,\
        path_data,Not_on_NicolasPC = get_database(target_dataset)
        
    images_in_set = df_label[df_label['set']=='test'][item_name].values
    
    images_in_set = [images_in_set[0]]
    for image in images_in_set:
        #image = images_in_set[20]
        image_path = os.path.join(path_to_img,image+'.jpg')           
        if cropCenter:
            image_array= load_and_crop_img(path=image_path,Net=Net,target_size=sizeIm,
                                    crop_size=sizeIm,interpolation='lanczos:center')
            # For VGG or ResNet with classification head size == 224
        else:
            image_array = load_resize_and_process_img(image_path,Net=Net,max_dim=sizeIm)
        
        predictions = model.predict(image_array)
        df_label[df_label[item_name]==image][classes].values
        c_i = 0
        SmoothGradsaliencyMap = SmoothedMask(model,c_i,stdev_spread=.15,\
                                   nsamples=25,magnitude=False)

        smooth_grad_of_image = SmoothGradsaliencyMap.GetMask(image_array)
        
        smooth_grad_of_image_scaled = take_abs_and_rescale(smooth_grad_of_image)
        
        smooth_grad_of_image_scaled = to_01(smooth_grad_of_image)
        ShowGrayscaleImage(smooth_grad_of_image_scaled[0,:,:,:])
        
        integrated_grad_of_image = GetMask_IntegratedGradients(image_array,model,c_i,
                                               x_steps=50)
        integrated_grad_of_image_scaled = to_01(integrated_grad_of_image) 
        ShowGrayscaleImage(integrated_grad_of_image_scaled[0,:,:,:])
        
        # Dans ce cas là on a un gradient selon les 3 canaux couleurs
        integrated_grad_randBaseline_of_image = GetMask_RandomBaseline_IntegratedGradients(image_array,model,c_i,
                                               x_steps=50,num_random_trials=10)
        integrated_grad_randBaseline_of_image_scaled = to_01(integrated_grad_randBaseline_of_image) 
        ShowGrayscaleImage(integrated_grad_randBaseline_of_image_scaled[0,:,:,:])
        
        integrated_grad_noisy_image = GetMask_IntegratedGradients_noisyImage(image_array,model,c_i,
                                               x_steps=50,num_random_trials=10,stdev_spread=.15)
        integrated_grad_noisy_image_scaled = to_01(integrated_grad_noisy_image) 
        ShowGrayscaleImage(integrated_grad_noisy_image_scaled[0,:,:,:])
def eval_MAP_SaliencyMethods(database='IconArt_v1',metamodel='FasterRCNN',demonet='res152_COCO',
                      k_per_bag=300,SaliencyMethod='SmoothGrad'):
    """
    The goal of this function is to compute the mAP of the saliency method for 
    classification ResNet 
    
    @param : SaliencyMethod : IntegratedGrad ou SmoothGrad pour le moment
    """
    matplotlib.use('Agg') 
    save_data = False
    
    ReDo = True
    plot = False    
    TEST_NMS = 0.01
    thresh_classif = 0.1
    
    # Parameter for the classification network 
    target_dataset = 'IconArt_v1'
    style_layers = []
    features = 'activation_48'
    normalisation = False
    final_clf= 'LinearSVC' # Don t matter
    source_dataset=  'ImageNet'
    transformOnFinalLayer='GlobalAveragePooling2D'

    final_clf = 'MLP2'
    epochs = 20
    optimizer = 'SGD'
    return_best_model = True
    batch_size= 16
    dropout=None
    regulOnNewLayer=None
    nesterov=False
    SGDmomentum=0.9
    decay=1e-4
    cropCenter = False
    # Load ResNet50 normalisation statistics
    
    opt_option = [0.1,0.01]
    pretrainingModif = True
    kind_method = 'FT'
    computeGlobalVariance = False
    constrNet = 'ResNet50'

    sizeIm = 224
    Net = constrNet

    # Load the box proosals
    item_name,path_to_img,default_path_imdb,classes,ext,num_classes,str_val,df_label,path_data,Not_on_NicolasPC = get_database(database)
    imdb,list_im_withanno = get_imdb_and_listImagesInTestSet(database)
    num_images_detect = len(list_im_withanno)
    dict_rois = getDictBoxesProposals(database=target_dataset,k_per_bag=k_per_bag,\
                                      metamodel=metamodel,demonet=demonet)
        
    for_data_output = os.path.join(path_data,'dataSaliencyMap',SaliencyMethod)
    im_with_boxes_output = os.path.join(path_data,'SaliencyMapImagesBoxes',SaliencyMethod)
    print('===',im_with_boxes_output)
    pathlib.Path(for_data_output).mkdir(parents=True, exist_ok=True)
    pathlib.Path(im_with_boxes_output).mkdir(parents=True, exist_ok=True)
    
    # Load Classification model 
    print('loading :',constrNet,computeGlobalVariance,kind_method,pretrainingModif,opt_option)         
    model = learn_and_eval(target_dataset,source_dataset,final_clf,features,\
                           constrNet,kind_method,style_layers=style_layers,
                           normalisation=normalisation,transformOnFinalLayer=transformOnFinalLayer,
                           batch_size_RF=16,epochs_RF=20,momentum=0.9,ReDo=False,
                           returnStatistics=True,cropCenter=cropCenter,\
                           computeGlobalVariance=computeGlobalVariance,\
                           epochs=epochs,optimizer=optimizer,opt_option=opt_option,
                           return_best_model=return_best_model,\
                           batch_size=batch_size,gridSearch=False,verbose=True)
        
    SaliencyMapClass_tab = []
    stdev_spread = 0.1
    nsamples = 50
    x_steps = 50
    
    for j in range(num_classes):
        SaliencyMapClass=getSaliencyMapClass(model,c_i=j,method=SaliencyMethod,\
                                         stdev_spread=stdev_spread,nsamples=nsamples,x_steps=x_steps)    
        SaliencyMapClass_tab +=[SaliencyMapClass]
        
#    list_gt_boxes_classes = []
    candidate_boxes = [[] for _ in range(imdb.num_images)]
    all_boxes_order = [[[] for _ in range(num_images_detect)] for _ in range(imdb.num_classes)]
#    number_gt_boxes = 0
    itera = 20
    norm = True
    t0 = time.time()
    # Un peu plus de 1440 images
    for i in range(imdb.num_images):
#        complet_name_tab = ('.'.join(complet_name.split('.')[0:-1])).split('/')
        im_path = imdb.image_path_at(i)
        name_im = im_path.split('/')[-1]
        if i%itera==0:
            t1 = time.time()
            print(i,name_im,'duration for ',itera,'iterations = ',str(t1-t0),'s')
            t0 = time.time()
        im = cv2.imread(im_path)
        hauteur, largeur ,_ = im.shape
        blobs, im_scales = get_blobs(im)
        
        if database=='PeopleArt':
            name_im =  '/'.join(im_path.split('/')[-2:])
        if database=='PeopleArt':
            name_im= '.'.join(name_im.split('.')[0:-1])
        else:
            name_im = name_im.split('.')[0]
        proposals_boxes = dict_rois[name_im]
        
        if cropCenter:
            image_array= load_and_crop_img(path=im_path,Net=Net,target_size=sizeIm,
                                    crop_size=sizeIm,interpolation='lanczos:center')
            # For VGG or ResNet with classification head size == 224
        else:
            image_array = load_resize_and_process_img(im_path,Net=Net,max_dim=sizeIm)
        
        #print(np.max(image_array),np.min(image_array),np.mean(image_array),np.median(image_array))
        #input('wait')
        
        dict_sensitivity = {}
        dict_sensitivity_path = os.path.join(for_data_output,name_im+'_dict_SaliencyMap'+SaliencyMethod+'_std'+str(stdev_spread)+'_n'+str(nsamples)+'_steps'+str(x_steps)+'.pkl')
        if not(os.path.exists(dict_sensitivity_path)) or ReDo:
            predictions = model.predict(image_array)[0]
            dict_sensitivity['predictions'] = predictions
            inds = np.where(predictions > thresh_classif)[0]
            for ind in inds:
                prediction = predictions[ind]
                if np.isnan(prediction):
                    print('Prediction of ',name_im,'is nan !!!')
                    input('wait')
                candidate_boxes = []
                j = ind +1  # the class index for the evaluation part
                Smap=SaliencyMapClass_tab[ind].GetMask(image_array)
                #print('before normalisation',np.max(Smap),np.min(Smap),np.mean(Smap),np.median(Smap))
                if save_data: dict_sensitivity[j] = Smap
                
                if SaliencyMethod=='SmoothGrad':
                    #Smap_grey = np.mean(Smap,axis=-1,keepdims=True)
                    Smap_grey = np.mean(np.abs(Smap),axis=-1,keepdims=True)
                    #print('after grey',np.max(Smap_grey),np.min(Smap_grey),np.mean(Smap_grey),np.median(Smap_grey))
                    if norm:
                        Smap_grey = to_01(Smap_grey)
                    #print('after normalisation',np.max(Smap_grey),np.min(Smap_grey),np.mean(Smap_grey),np.median(Smap_grey))
                    
                    Smap_grey_time_score = prediction*Smap_grey
                    
                else: # In the case of Integrated Gradient
                    
                    # Sur conseil d Antoine Pirovano
                    ptile= 99
                    # Sum for grayscale of the absolute value
                    pixel_attrs = np.sum(np.abs(Smap), axis=-1,keepdims=True)
                    pixel_attrs = np.clip(pixel_attrs / np.percentile(pixel_attrs, ptile), 0, 1)
                    
                    Smap_grey_time_score = prediction * pixel_attrs
                    
                #print('after mul score',np.max(Smap_grey_time_score),np.min(Smap_grey_time_score),np.mean(Smap_grey_time_score),np.median(Smap_grey_time_score))
                # attention truc super contre intuitif dans le resize c'est hauteur largeur alors que 
                # la fonction size retourne largeur hauteur
                Smap_grey_time_score = Smap_grey_time_score[0]
                #Smap_grey_time_score_resized =  cv2.resize(Smap_grey_time_score, (hauteur, largeur),interpolation=cv2.INTER_NEAREST) 
                Smap_grey_time_score_resized =  cv2.resize(Smap_grey_time_score, (largeur,hauteur),interpolation=cv2.INTER_NEAREST) 
                #print('Smap_grey_time_score_resized',Smap_grey_time_score_resized.shape,im.shape)
                #print('after resize',np.max(Smap_grey_time_score_resized),np.min(Smap_grey_time_score_resized),np.mean(Smap_grey_time_score_resized),np.median(Smap_grey_time_score_resized))
                
                if plot:
                    name_output = name_im+'_'+SaliencyMethod+'_std'+str(stdev_spread)+'_n'+str(nsamples)+'_steps'+str(x_steps)+ '_'+str(j)+'.jpg'
                    name_output_path = os.path.join(im_with_boxes_output,name_output)
                    Smap_grey_time_score_resized_01 = to_01(Smap_grey_time_score_resized)
                    plt.imshow(Smap_grey_time_score_resized_01, cmap=cm.gray)
                    plt.title(classes[j-1]+' : '+str(prediction))
                    plt.savefig(name_output_path)
                    plt.close()
                
                for k in range(len(proposals_boxes)):
                    box = proposals_boxes[k]
                    x1,y1,x2,y2 = box # x : largeur et y en hauteur
                    x1_int = int(np.round(x1))
                    x2_int = int(np.round(x2))
                    y1_int = int(np.round(y1))
                    y2_int = int(np.round(y2))
                    #print(name_im,'Smap_grey_time_score_resized',Smap_grey_time_score_resized.shape,im.shape)
                    #print(x1_int,x2_int,y1_int,y2_int)
                    assert(x2_int<=largeur)
                    assert(y2_int<=hauteur)
                    Smap_grey_time_score_resized_crop = Smap_grey_time_score_resized[y1_int:y2_int,x1_int:x2_int]
                    
                    # because bbox = dets[i, :4] # Boxes are score, x1,y1,x2,y2
                    Smap_grey_time_score_resized_crop_score = np.mean(Smap_grey_time_score_resized_crop)
                    # if k < 3:
                    #     print('Smap_grey_time_score_resized_crop',Smap_grey_time_score_resized_crop.shape)
                    #     print(x1_int,x2_int,y1_int,y2_int)
                    #     print('j',j,'k',k,',score',Smap_grey_time_score_resized_crop_score)
                    if not(np.isnan(Smap_grey_time_score_resized_crop_score)):
                        box_with_scores = np.append(box,[Smap_grey_time_score_resized_crop_score])
                        candidate_boxes += [box_with_scores]
                    else:
                        box_with_scores = np.append(box,[0.0])
                        candidate_boxes += [box_with_scores]
                        
                    # if np.isnan(Smap_grey_time_score_resized_crop_score):
                    #     print('!!! score is nan')
                    #     print(x1,y1,x2,y2)
                    #     print(x1_int,x2_int,y1_int,y2_int)
                    #     print(Smap_grey_time_score_resized_crop.shape)
                    #     print(name_im,'Smap_grey_time_score_resized',Smap_grey_time_score_resized.shape,im.shape)
                    #     print(prediction)
                    #     print('after resize',np.max(Smap_grey_time_score_resized),np.min(Smap_grey_time_score_resized),np.mean(Smap_grey_time_score_resized),np.median(Smap_grey_time_score_resized))
                    #     print(Smap_grey_time_score_resized_crop_score)
                    #     input('wait')
                    
                #print(candidate_boxes)
                if len(candidate_boxes)>0:
                    candidate_boxes_NP = np.array(candidate_boxes)
                    
                    candidate_boxes_NP[:,-1] = candidate_boxes_NP[:,-1] -np.max(candidate_boxes_NP[:,-1]) + prediction 
                    keep = nms(candidate_boxes_NP, TEST_NMS)
                    cls_dets = candidate_boxes_NP[keep, :]
                    all_boxes_order[j][i]  = cls_dets
                
            if plot:
                roi_boxes_and_score = []
                local_cls = []
                for j in range(num_classes):
                    cls_dets = all_boxes_order[j+1][i] 
                    if len(cls_dets) > 0:
                        local_cls += [classes[j]]
                        roi_boxes_score = cls_dets
                        if roi_boxes_and_score is None:
                            roi_boxes_and_score = [roi_boxes_score]
                        else:
                            roi_boxes_and_score += [roi_boxes_score] 
                if roi_boxes_and_score is None: roi_boxes_and_score = [[]]
                #print(name_im,roi_boxes_and_score,local_cls)
                vis_detections_list(im, local_cls, roi_boxes_and_score, thresh=-np.inf)
                name_output = name_im+'_'+SaliencyMethod+'_std'+str(stdev_spread)+'_n'+str(nsamples)+'_steps'+str(x_steps)+ '_Regions.jpg'
                name_output_path = os.path.join(im_with_boxes_output,name_output)
                #input("wait")
                plt.savefig(name_output_path)
                plt.close()
            
            if save_data:
                with open(dict_sensitivity_path, 'wb') as f:
                    pickle.dump(dict_sensitivity, f, pickle.HIGHEST_PROTOCOL)
            
    # for i in range(imdb.num_images):     
    #     candidate_boxes[i] = np.array(candidate_boxes[i])
    
    imdb.set_force_dont_use_07_metric(True)
    det_file = os.path.join(path_data, 'detectionsSaliencyMap'+SaliencyMethod+'_std'+str(stdev_spread)+'_n'+str(nsamples)+'_steps'+str(x_steps)+'.pkl')
    with open(det_file, 'wb') as f:
        pickle.dump(all_boxes_order, f, pickle.HIGHEST_PROTOCOL)
    output_dir = path_data +'tmp/' + database+'_'+SaliencyMethod+'_std'+str(stdev_spread)+'_n'+str(nsamples)+'_steps'+str(x_steps)+'_mAP.txt'
    aps =  imdb.evaluate_detections(all_boxes_order, output_dir) # AP at O.5 
    print("===> Detection score (thres = 0.5): ",database,'with Saliency map from',SaliencyMethod,'with std =',stdev_spread,'nsamples = ',nsamples,'x_steps =',x_steps)
    print(arrayToLatex(aps,per=True))
    ovthresh_tab = [0.3,0.1,0.]
    for ovthresh in ovthresh_tab:
        aps = imdb.evaluate_localisation_ovthresh(all_boxes_order, output_dir,ovthresh)
        print("Detection score with thres at ",ovthresh)
        print(arrayToLatex(aps,per=True))