def Load_weights(Pretrained_net,Arch , dataset_type,BN):
    X_val = None
    y_val = None
    if dataset_type=='cifar10':
        print('load cifar10 dataset')
        normaliz_input = False

        size_images = 32
        num_channel = 3
        num_classes = 10
        X_train, y_train, X_test, y_test,  meanpixel = ldb.load_cifar_dataset()
        print('min of train image before Nor '+str ( np.min(X_train)))
        print('min of mean pixel'+str ( np.min(meanpixel)))

        X_train -=  meanpixel
        X_test -=  meanpixel

    elif dataset_type == 'mnist':
        print('load mnist dataset')
        normaliz_input = False

        size_images = 28
        num_channel = 1
        num_classes = 10
        X_train, y_train,  X_test, y_test = ldb.load_MNIST_dataset()
        meanpixel = np.zeros(X_train[0].shape)
    elif dataset_type =='SVHN':

        normaliz_input = True
        print('Street View is selected')
        size_images=32
        num_channel = 3
        num_classes = 10

        X_train, y_train, X_val, y_val, X_test, y_test, EignVectors, EignValues = ldb.load_SVNH_dataset( )
        meanpixel = np.zeros(X_train[0].shape)
    elif dataset_type == 'cifar100':
        print('load cifar100 dataset')

        normaliz_input = False
        size_images = 32
        num_channel = 3
        num_classes = 100
        X_train, y_train, X_test, y_test, meanpixel = ldb.load_cifar100_dataset(Normal_flag= True)
        print('number of training '+str(len(y_train)))

    clip_min = np.min(X_train)
    clip_max = np.max(X_train)
    print\
        ('clip min '+str(clip_min))
    print('clip max ' + str(clip_max))

    input_var = T.tensor4('inputs', dtype='float32')
    target_var = T.ivector('targets')

    if Arch =='NiN':
        # create CNN
        num_filter =[192,160,96,192,192,192,192,192,num_classes]
        print('select NiN structure')
        network  = CNN.NiN(input_var=input_var, num_chan= num_channel ,width = size_images,num_fill =num_filter)
    elif Arch =='cuda_conv':
        num_filter = [32, 32, 64]
        print('Batch Normalization'+str(BN))
        network , logit_layer = CNN.Cuda_Conv_18(input_var=input_var, num_chan=num_channel, width=size_images, num_fill=num_filter, FCparams=0, num_outputs=num_classes, flag=False, normaliz_input = normaliz_input, BN=BN)
    elif Arch == 'resnet':
        n=18
        network, logit_layer = CNN.ResNet_FullPreActivation(input_var=input_var, PIXELS=size_images, num_outputs=num_classes, n=n)
    elif Arch =='VGG':
        network, logit_layer = CNN.VGG2(type='VGG16', input_var=input_var, num_chan=num_channel, width=size_images,
                          num_outputs=num_classes)


    # get the softmax output
    test_prediction = lasagne.layers.get_output(network, deterministic=True)

    # get the logit vector of the net (the output before softmax activation function)
    logits = lasagne.layers.get_output(logit_layer, deterministic=True)

    test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,target_var)
    test_loss = test_loss.mean(axis=0)
    test_acc = T.eq(T.argmax(test_prediction, axis=1), target_var)

    # Theano functions for loss and logits output
    logits_fn = theano.function([input_var],[logits], allow_input_downcast=True)
    predic_fun = theano.function([input_var], [test_prediction], allow_input_downcast=True)
    val_fn_loss = theano.function([input_var, target_var], [test_loss], allow_input_downcast=True)
    val_fn_acc = theano.function([input_var, target_var], [test_acc],allow_input_downcast=True)

    # gradient of the loss respect to input
    grad_loss = theano.grad(test_loss, input_var)
    fun_grad_loss = theano.function([input_var, target_var], grad_loss, allow_input_downcast=True)

    # the gradient of logit of each class respect to input used for DeepFool attack
    [dfdx, Pert] = theano.scan(lambda j, logits , input_var : T.grad(logits[0,j], input_var), sequences=T.arange(num_classes),
                             non_sequences=[logits,input_var])
    fun_grad_classifir = theano.function([input_var],[dfdx])


    print('load pretrained weights from '+str(Pretrained_net))
    # load the trained weights of CNN from a .pkl file
    try:
        net = pickle.load(open(Pretrained_net, 'r'))
    except:
        net = pickle.load(gzip.open(Pretrained_net, 'r'))


    # the loaded network (i.e. net) is a dictionary, where net['params'] is the trained weights of the loaded network
    all_param = net['params']
    lasagne.layers.set_all_param_values(network, all_param)

    # compute the accuracy on training set and test set. For generating adversaries only for correctly classified samples
    all_acc = []
    for batch in util.iterate_minibatches(X_train, y_train, 100, shuffle=False):
        inputs, targets, indices = batch
        all_acc.append(val_fn_acc(inputs,targets))
    all_acc = np.asarray(np.hstack(all_acc))
    indx_corrc = np.where(all_acc[0]==1)
    print('accuracy on training  '+str(np.mean(all_acc[0])))
    pickle.dump(indx_corrc,open(dataset_type+'_Index_correcltyClassifiedTrain_'+Arch+'.pkl','wb'))

    all_acc = []
    for batch in util.iterate_minibatches(X_test, y_test, 100, shuffle=False):
        inputs, targets, indices = batch
        all_acc.append(val_fn_acc(inputs,targets))
    all_acc = np.asarray(np.hstack(all_acc))
    indx_corrc = np.where(all_acc[0]==1)
    counting = np.asarray([len(np.where(y_test[indx_corrc] == i)[0]) for i in range(num_classes)])
    print('accuracy on test  '+str(np.mean(all_acc[0])))
    pickle.dump(indx_corrc,open(dataset_type+'_Index_correcltyClassifiedTest_'+Arch+'.pkl','wb'))

    net = {'net_name':Arch,'loss': val_fn_loss, 'acc': val_fn_acc, 'logit':logits_fn, 'predict': predic_fun, 'gradLoss': fun_grad_loss, 'gradClassifier':fun_grad_classifir}
    data = {'X_train':X_train, 'y_train':y_train, 'X_test':X_test, 'y_test':y_test, 'meanPixel': meanpixel, 'clp_min':clip_min, 'clp_max':clip_max}
    return net, data
    def Generate_Attack (self,targeted=False):

        X = self.data['X_train'][self.indx]
        Y = self.data['y_train'][self.indx]
        meanpixel = self.data['meanPixel']
        clp_max = self.data['clp_max']
        clp_min = self.data['clp_min']
        epsilon = self.epsilon
        dataset_type = self.dataset_type
        indx = self.indx
        print(np.unique(Y))
        dustbin = len(np.unique(Y))
        fun_grad = self.net_info['gradLoss']
        p_softmax = self.net_info['predict']
        output_x = []
        output_y = []
        distortion = []
        outputVals = []
        Indx = []
        not_found = 0
        print('clp_min' + str(clp_min))
        print('clp_max' + str(clp_max))
        Avg_iter = 0
        total_iteration = 3
        for idx_init_img in range(len(X)):

            itc = 1
            pert = 0
            prtb_x, prtb_y = X[idx_init_img:idx_init_img + 1].copy(), Y[idx_init_img:idx_init_img + 1].copy()
            orig_x = X[idx_init_img:idx_init_img + 1].copy()

            target_y = np.argmin(p_softmax(orig_x)[0][:, :dustbin], axis=1).astype('uint8')
            print('= =========================')

            while itc <= total_iteration:
                num_classes = p_softmax(prtb_x)[0].shape[1]
                if targeted:
                    eta = epsilon * np.sign(fun_grad(prtb_x, target_y))
                else:
                    eta = epsilon * np.sign(fun_grad(prtb_x, prtb_y))

                pert = pert + eta

                if (itc >= 1):
                    if dataset_type == 'mnist':
                        alpha = 0.3  # MNIST
                    elif dataset_type == 'cifar10':
                        alpha = 0.05  # cifar10
                    pert = np.clip(pert, -alpha, alpha)

                if targeted:
                    prtb_x = orig_x - pert
                else:
                    prtb_x = orig_x + pert
                fooled_y = np.uint8(np.argmax(p_softmax(np.clip(prtb_x, clp_min, clp_max))[0], axis=1))

                if (prtb_y != fooled_y) and fooled_y != dustbin:

                    print('final fooled Target' + str(fooled_y))
                    print('True label' + str(prtb_y))
                    print('iteration : ' + str(itc))
                    prtb_x = np.clip(prtb_x, clp_min, clp_max)
                    Ec_dist = np.sqrt(np.mean((prtb_x[0] - orig_x[0]) ** 2))
                    print('Magnitude of distortion ' + str(Ec_dist))
                    distortion.append(Ec_dist)
                    outputVals.append(p_softmax(prtb_x)[0])
                    # print('Probability of adversarial'+str(np.max(p_softmax(prtb_x)[0], axis=1)))
                    output_x.append(prtb_x[0])
                    output_y.append(prtb_y)
                    Indx.append(indx[idx_init_img])
                    estimated_prediction = np.vstack(np.asarray(outputVals, dtype='float32'))
                    if np.mod(idx_init_img, 20) == 0:
                        print('***********img ' + str(idx_init_img) + '************')
                        print("not found =" + str(not_found))
                        print("Avg distortion {:.4f}, Avg confidence {:.4f} ".format(np.mean(distortion), np.mean(
                            np.max(estimated_prediction, axis=1), axis=0)))
                        print('***********************')
                    Avg_iter += itc

                    break
                itc += 1
            if itc > total_iteration:
                not_found += 1
                print(str(idx_init_img) + ' Not found****')

        print('=============== Average of iterations : ' + str(Avg_iter / len(outputVals)))
        data_fooled = zip(output_x, outputVals, output_y, distortion, Indx)
        # np.save(dataset_type+'_AdvTest_' + str(len(X))+'_espi_'+str(epsilon)+'_FastSign', output_x)

        pickle.dump({'data': data_fooled, 'meanpixel': meanpixel, 'Avg_iter': Avg_iter / len(outputVals)}, open(
            os.path.join(dataset_type + '_' + str(num_classes) + '_AdvSamples_' + str(len(X)) + '_espi_'+str(epsilon)+'_itc_'+str(total_iteration)+self.Arch+'_Targeted='+str(targeted)
                         +'.pkl'),'wb'), protocol=pickle.HIGHEST_PROTOCOL)

        val_fn_acc = self.net_info['acc']
        all_acc = []
        for batch in util.iterate_minibatches(np.asarray(output_x), np.asarray(output_y)[0].flatten(), 100,
                                              shuffle=False):
            inputs, targets, indices = batch
            all_acc.append(val_fn_acc(inputs, targets))
        all_acc = np.asarray(np.hstack(all_acc))
        print('accuracy on adversaries  ' + str(np.mean(all_acc[0])))