def main():
    _, path = sys.argv
    datasets = ["GTSRB", "mnist", "rockpaperscissors", "sign-language"]
    for dataset in datasets:
        print("")
        print("=====")
        print(dataset)
        data_path = f"{path}/{dataset}"
        data = get_data(dataset)
        gc.collect()
        df_data = {"accuracy": [], "file_name": []}
        for file_name in os.listdir(f"{data_path}/adv_models"):
            model_path = f"{data_path}/adv_models/{file_name}"
            print(model_path)
            model = load_model(model_path,
                               custom_objects={
                                   'fn': loss,
                                   'tf': tf,
                                   'atan': tf.math.atan
                               })
            _, accuracy = model.evaluate(data.test_data,
                                         data.test_labels,
                                         verbose=0)
            df_data["accuracy"].append(accuracy)
            df_data["file_name"].append(file_name)
        df = pd.DataFrame(df_data)
        df.to_csv(f"{data_path}/adv_model_natural_accuracy.csv", index=False)
Example #2
0
    def __init__(self,
                 restore=None,
                 session=None,
                 use_softmax=False,
                 use_brelu=False,
                 activation="relu"):
        def bounded_relu(x):
            return K.relu(x, max_value=1)

        if use_brelu:
            activation = bounded_relu

        print("inside MNISTModel: activation = {}".format(activation))

        self.num_channels = 1
        self.image_size = 28
        self.num_labels = 10

        model = load_model(restore)
        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(
                    K.function([model.layers[0].input], [layer.output]))

        self.model = model
        self.layer_outputs = layer_outputs
Example #3
0
def _load_keras_model(model_b64):
    import tempfile
    import base64

    fd, path = tempfile.mkstemp()
    try:
        with os.fdopen(fd, 'wb') as tmp:
            tmp.write(base64.b64decode(model_b64.encode('utf-8')))
            tmp.close()
    finally:
        keras_model = load_model(path, compile=False)
        opened_new_file = not isinstance(path, h5py.File)
        if opened_new_file:
            f = h5py.File(path, mode='r')
        else:
            f = path
        training_config = f.attrs.get('training_config')
        optimizer_cls = None
        if training_config is None:
            optimizer_cls = tf.keras.optimizers.Adam()
        else:
            training_config = json.loads(training_config.decode('utf-8'))
            optimizer_config = training_config['optimizer_config']
            optimizer_cls = tf.keras.optimizers.deserialize(optimizer_config)

        if opened_new_file:
            f.close()
        os.remove(path)
        _, W = keras_model.inputs[0].get_shape()
        add_loss(keras_model, int(W))
        keras_model.compile(optimizer=optimizer_cls, )

    return keras_model
Example #4
0
def LoadModel():
	global D
	D=None
	D=load_model('./tdoa1.h5')
	if D==None:
		i=Input(shape=(M,4))
		print("1=====",i)
		a=Flatten()(i)
		a=Dense(200,activation='relu')(a)
		a=Dense(160,activation='relu')(a)
		a=Dense(120,activation='relu')(a)
		a=Dense(80,activation='relu')(a)
		a=Dense(80,activation='relu')(a)
		a=Dense(80,activation='relu')(a)
		a=Dense(60,activation='relu')(a)
		a=Dense(60,activation='relu')(a)
		a=Dense(60,activation='relu')(a)
		a=Dense(60,activation='relu')(a)
		a=Dense(60,activation='relu')(a)
		a=Dense(60,activation='relu')(a)
		a=Dense(60,activation='relu')(a)
		a=Dense(60,activation='relu')(a)
		a=Dense(40,activation='relu')(a)
		a=Dense(40,activation='relu')(a)
		a=Dense(40,activation='relu')(a)
		a=Dense(40,activation='relu')(a)
		a=Dense(40,activation='relu')(a)
		a=Dense(40,activation='relu')(a)
		a=Dense(20,activation='relu')(a)
		a=Dense(10,activation='relu')(a)
		o=Dense(2,activation='tanh')(a)
		D=Model(inputs=i,outputs=o)
		D.compile(loss='mse',optimizer='adam',metrics=['accuracy'])
Example #5
0
def create_submission():
    """
    create submission
    """
    model = load_model(MODEL_NAME + '_model.h5')

    n_test_images = 40670
    for k in range(0, n_test_images):
        image_path = os.path.join(PLANET_KAGGLE_TEST_JPEG_DIR,
                                  'test_' + str(k) + '.jpg')
        print(image_path)

        img = cv2.imread(image_path)
        img = img[None, ...]
        pred = model.predict(img)
        pred = pred.astype(int)

        pred_arr[k, :] = pred

    print('pred_arr.shape', pred_arr.shape)
    pred_arr = pred_arr.clip(min=0)
    df_submission = pd.DataFrame()
    df_submission['test_id'] = range(0, n_test_images)
    df_submission['adult_males'] = pred_arr[:, 0]
    df_submission['subadult_males'] = pred_arr[:, 1]
    df_submission['adult_females'] = pred_arr[:, 2]
    df_submission['juveniles'] = pred_arr[:, 3]
    df_submission['pups'] = pred_arr[:, 4]
    df_submission.to_csv(MODEL_NAME + '_submission.csv', index=False)
Example #6
0
def hsja_attack(file_name,
                norm,
                sess,
                num_image=10,
                data_set_class=MNIST(),
                targeted=False):
    print("hsja attack", flush=True)
    np.random.seed(1215)
    tf.set_random_seed(1215)
    random.seed(1215)

    data = data_set_class

    model = load_model(file_name,
                       custom_objects={
                           'fn': loss,
                           'tf': tf,
                           'ResidualStart': ResidualStart,
                           'ResidualStart2': ResidualStart2,
                           'tf': tf,
                           'atan': tf.math.atan
                       })
    inputs, targets, true_labels, true_ids, img_info = generate_data(
        data,
        samples=num_image,
        targeted=True,
        random_and_least_likely=True,
        target_type=0b0001,
        predictor=model.predict,
        start=0)
    if len(inputs) == 0:
        return 0, 0

    if targeted:
        target_examples = get_target_examples(data, model, targets)
        targets = np.argmax(targets, axis=1)
    else:
        target_examples = [None for i in range(len(targets))]
        targets = [None for i in range(len(targets))]

    if norm == "2":
        constraint = "l2"
        norm_fn = lambda x: np.sum(x**2, axis=(1, 2, 3))
    elif norm == "i":
        constraint = "linf"
        norm_fn = lambda x: np.max(np.abs(x), axis=(1, 2, 3))
    else:
        raise ValueError("norm must be 2 or inf")

    start_time = timer.time()
    perturbed_input, original_input = attack_multiple(inputs, targets,
                                                      target_examples,
                                                      constraint, model, data)
    UB = np.average(norm_fn(perturbed_input - original_input))
    print("Done calculating robustness. The average robustness was:", UB)
    time_spent = (timer.time() - start_time) / len(original_input)
    return UB, time_spent
Example #7
0
def get_accuracy(file_name, sess, epsilon, num_steps, step_size, data=MNIST()):
    model = load_model(file_name, custom_objects={'fn': loss, 'tf': tf, 'atan': tf.math.atan})
    start_time = time.time()
    adversaries = PGD(model, sess, epsilon, num_steps, step_size, data)
    predictions = model.predict(adversaries)
    accuracy = np.mean(np.equal(np.argmax(predictions, 1), np.argmax(data.test_labels, 1)))
    print(f"The accuracy was {accuracy}", flush=True)
    time_used = time.time() - start_time

    return accuracy, time_used
Example #8
0
def get_weights_biases(file_name):
    model = load_model(file_name, custom_objects={'fn': fn})
    temp_weights = [layer.get_weights() for layer in model.layers]
    weights = []
    biases = []
    for i in range(len(temp_weights)):
        if i % 2 != 0:
            W = temp_weights[i][0].T
            weights.append(W)
            biases.append(temp_weights[i][1])
    return weights, biases
def cw_attack(file_name, norm, sess, num_image=10, data_set_class=MNIST()):
    print("cw attack", flush=True)
    np.random.seed(1215)
    tf.set_random_seed(1215)
    random.seed(1215)

    data = data_set_class

    model = load_model(file_name,
                       custom_objects={
                           'fn': loss,
                           'tf': tf,
                           'ResidualStart': ResidualStart,
                           'ResidualStart2': ResidualStart2,
                           'tf': tf,
                           'atan': tf.math.atan
                       })
    inputs, targets, true_labels, true_ids, img_info = generate_data(
        data,
        samples=num_image,
        targeted=True,
        random_and_least_likely=True,
        target_type=0b0001,
        predictor=model.predict,
        start=0)
    if len(inputs) == 0:
        return 0, 0

    model.predict = model
    model.image_size = data.test_data.shape[1]
    model.num_channels = data.test_data.shape[3]
    model.num_labels = data.test_labels.shape[1]

    if norm == '1':
        attack = EADL1(sess, model, max_iterations=10000)
        norm_fn = lambda x: np.sum(np.abs(x), axis=(1, 2, 3))
    elif norm == '2':
        attack = CarliniL2(sess, model, max_iterations=10000)
        norm_fn = lambda x: np.sum(x**2, axis=(1, 2, 3))
    elif norm == 'i':
        attack = CarliniLi(sess, model, max_iterations=1000)
        norm_fn = lambda x: np.max(np.abs(x), axis=(1, 2, 3))

    start_time = timer.time()
    perturbed_input = attack.attack(inputs, targets)
    UB = np.average(norm_fn(perturbed_input - inputs))
    return UB, (timer.time() - start_time) / len(inputs)
Example #10
0
def predict(out_path, txt, top=1):
    import pickle
    import os
    if os.path.isfile(os.path.join(out_path, 'token_enc.pkl')):
        with open(os.path.join(out_path, 'token_enc.pkl'), 'rb') as f:
            tokenizer, seq_len, language = pickle.load(f)

        #do preprocessing bit
        from nltk.corpus import stopwords
        from nltk.stem.snowball import SnowballStemmer
        stopwords = stopwords.words(language)
        stemmer = SnowballStemmer(language)
        import re
        r = re.compile(r'[\W]', re.U)
        txt = r.sub(' ', txt)
        txt = re.sub('[\\s]+', ' ', txt)
        txt = [
            ' '.join(
                stemmer.stem(w.lower()) for w in txt.split()
                if w not in stopwords)
        ]

        #convert text to sequence
        txt_seq = tokenizer.texts_to_sequences(txt)
        from tensorflow.contrib.keras.api.keras.preprocessing import sequence
        txt_seq = sequence.pad_sequences(txt_seq, maxlen=seq_len)

        #load NN model and predict
        from tensorflow.contrib.keras.api.keras.models import load_model
        model = load_model(os.path.join(out_path, 'CNN1d.h5'))
        output = model.predict(txt_seq)

        #create binary sequences for top x predictions
        sorted_idx = (-output).argsort()
        import numpy as np
        label = np.zeros((top, len(output[0])))
        for i in range(0, top):
            label[i][sorted_idx[0][i]] = 1

        #convert to txt labels
        with open(os.path.join(out_path, 'label_enc.pkl'), 'rb') as f:
            label_decoder = pickle.load(f)
        return label_decoder.inverse_transform(label)
    else:
        return "Invalid output path!"
Example #11
0
def modify_backprop(model, name):
    g = tf.get_default_graph()
    with g.gradient_override_map({'Relu': name}):

        # get layers that have an activation
        layer_dict = [
            layer for layer in model.layers[1:]
            if hasattr(layer, 'activation')
        ]

        # replace relu activation
        for layer in layer_dict:
            if layer.activation == tf.keras.activations.relu:
                layer.activation = tf.nn.relu

        # re-instantiate a new model
        new_model = load_model('cifarClassification.h5')
    return new_model
Example #12
0
def cw_attack(file_name, norm, sess, num_image=10, cifar = False, tinyimagenet = False):
    np.random.seed(1215)
    tf.set_random_seed(1215)
    random.seed(1215)
    if norm == '1':
        attack = EADL1
        norm_fn = lambda x: np.sum(np.abs(x),axis=(1,2,3))
    elif norm == '2':
        attack = CarliniL2
        norm_fn = lambda x: np.sum(x**2,axis=(1,2,3))
    elif norm == 'i':
        attack = CarliniLi
        norm_fn = lambda x: np.max(np.abs(x),axis=(1,2,3))

    if cifar:
        data = CIFAR()
    elif tinyimagenet:
        data = tinyImagenet()
    else:
        data = MNIST()
    model = load_model(file_name, custom_objects={'fn':loss,'tf':tf, 'ResidualStart' : ResidualStart, 'ResidualStart2' : ResidualStart2})
    inputs, targets, true_labels, true_ids, img_info = generate_data(data, samples=num_image, targeted=True, random_and_least_likely = True, target_type = 0b0010, predictor=model.predict, start=0)
    model.predict = model
    model.num_labels = 10
    if cifar:
        model.image_size = 32
        model.num_channels = 3
    elif tinyimagenet:
        model.image_size = 64
        model.num_channels = 3
        model.num_labels = 200
    else:
        model.image_size = 28
        model.num_channels = 1
        
    
    start_time = timer.time()
    attack = attack(sess, model, max_iterations = 1000)
    perturbed_input = attack.attack(inputs, targets)
    UB = np.average(norm_fn(perturbed_input-inputs))
    return UB, (timer.time()-start_time)/len(inputs)
Example #13
0
def main(model_name, data):

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True  # dynamically grow the memory used on the GPU
    config.log_device_placement = False
    sess = tf.Session(config=config)
    with sess.as_default():
        # cw_attack(model_name, "i", sess, num_image=10, data_set_class=data)

        def loss(correct, predicted):
            return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                           logits=predicted)

        images = []
        true_labels = []
        targets = []
        i = 0
        while len(images) < 5:
            image = data.test_data[i]
            label = np.argmax(data.test_labels[i])
            num_classes = len(data.test_labels[0])
            if label not in true_labels:
                images.append(image)
                true_labels.append(label)
                targets.append(label + 1 % num_classes)
            i += 1

        model = load_model(model_name, custom_objects={'fn': loss, 'tf': tf})

        model.image_size = data.test_data.shape[1]
        model.num_channels = data.test_data.shape[3]
        model.num_labels = data.test_labels.shape[1]
        model.predict = model

        images = np.array(images)
        targets = np.eye(model.num_labels)[targets]

        attack_0 = CarliniL0(sess, model, max_iterations=1000)
        attack_1 = EADL1(sess, model, max_iterations=1000)
        attack_2 = CarliniL2(sess, model, max_iterations=1000)
        attack_inf = CarliniLi(sess, model, max_iterations=1000)

        peturbed_0 = attack_0.attack(images, targets)
        peturbed_1 = attack_1.attack(images, targets)
        peturbed_2 = attack_2.attack(images, targets)
        peturbed_inf = attack_inf.attack(images, targets)

    fig = plt.figure(figsize=(10, 10))

    def plot_images(x_position, images, sets):
        images = images + 0.5
        images = images / np.max(images)
        for i in range(len(images)):
            ax = fig.add_subplot(5, sets, x_position + i * sets)
            plt.imshow(images[i], cmap='gray')
            ax.get_xaxis().set_visible(False)
            ax.get_yaxis().set_visible(False)

    plot_images(1, np.reshape(images, [-1, 28, 28]), 5)
    plot_images(2, np.reshape(peturbed_0, [-1, 28, 28]), 5)
    plot_images(3, np.reshape(peturbed_1, [-1, 28, 28]), 5)
    plot_images(4, np.reshape(peturbed_2, [-1, 28, 28]), 5)
    plot_images(5, np.reshape(peturbed_inf, [-1, 28, 28]), 5)
    plt.show()
Example #14
0
from tensorflow.contrib.keras.api.keras.models import load_model
from cifar_data_load import GetTestDataByLabel
import numpy as np

if __name__ == '__main__':
    # 载入训练好的模型
    model = load_model("lenet-no-activation-model.h5")
    # 获取测试集的数据
    X = GetTestDataByLabel('data')
    Y = GetTestDataByLabel('labels')
    # 统计预测正确的图片的数目
    print(np.sum(np.equal(Y, np.argmax(model.predict(X), 1))))
def run(file_name, n_samples, p_n, q_n, activation = 'relu', cifar=False, tinyimagenet=False):
    np.random.seed(1215)
    tf.set_random_seed(1215)
    random.seed(1215)
    keras_model = load_model(file_name, custom_objects={'fn':fn, 'tf':tf})
    if tinyimagenet:
        model = CNNModel(keras_model, inp_shape = (64,64,3))
    elif cifar:
        model = CNNModel(keras_model, inp_shape = (32,32,3))
    else:
        model = CNNModel(keras_model)

    #Set correct linear_bounds function
    global linear_bounds
    if activation == 'relu':
        linear_bounds = relu_linear_bounds
    elif activation == 'ada':
        linear_bounds = ada_linear_bounds
    elif activation == 'sigmoid':
        linear_bounds = sigmoid_linear_bounds
    elif activation == 'tanh':
        linear_bounds = tanh_linear_bounds
    elif activation == 'arctan':
        linear_bounds = atan_linear_bounds
    upper_bound_conv.recompile()
    lower_bound_conv.recompile()
    compute_bounds.recompile()

    if cifar:
        inputs, targets, true_labels, true_ids, img_info = generate_data(CIFAR(), samples=n_samples, targeted=True, random_and_least_likely = True, target_type = 0b0010, predictor=model.model.predict, start=0)
    elif tinyimagenet:
        inputs, targets, true_labels, true_ids, img_info = generate_data(tinyImagenet(), samples=n_samples, targeted=True, random_and_least_likely = True, target_type = 0b0010, predictor=model.model.predict, start=0)
    else:
        inputs, targets, true_labels, true_ids, img_info = generate_data(MNIST(), samples=n_samples, targeted=True, random_and_least_likely = True, target_type = 0b0010, predictor=model.model.predict, start=0)
    #0b01111 <- all
    #0b0010 <- random
    #0b0001 <- top2
    #0b0100 <- least

    steps = 15
    eps_0 = 0.05
    summation = 0
    warmup(model, inputs[0].astype(np.float32), eps_0, p_n, find_output_bounds)
        
    start_time = time.time()
    for i in range(len(inputs)):
        print('--- CNN-Cert: Computing eps for input image ' + str(i)+ '---')
        predict_label = np.argmax(true_labels[i])
        target_label = np.argmax(targets[i])
        weights = model.weights[:-1]
        biases = model.biases[:-1]
        shapes = model.shapes[:-1]
        W, b, s = model.weights[-1], model.biases[-1], model.shapes[-1]
        last_weight = (W[predict_label,:,:,:]-W[target_label,:,:,:]).reshape([1]+list(W.shape[1:]))
        weights.append(last_weight)
        biases.append(np.asarray([b[predict_label]-b[target_label]]))
        shapes.append((1,1,1))

        #Perform binary search
        log_eps = np.log(eps_0)
        log_eps_min = -np.inf
        log_eps_max = np.inf
        for j in range(steps):
            LB, UB = find_output_bounds(weights, biases, shapes, model.pads, model.strides, inputs[i].astype(np.float32), np.exp(log_eps), p_n)
            print("Step {}, eps = {:.5f}, {:.6s} <= f_c - f_t <= {:.6s}".format(j,np.exp(log_eps),str(np.squeeze(LB)),str(np.squeeze(UB))))
            if LB > 0: #Increase eps
                log_eps_min = log_eps
                log_eps = np.minimum(log_eps+1, (log_eps_max+log_eps_min)/2)
            else: #Decrease eps
                log_eps_max = log_eps
                log_eps = np.maximum(log_eps-1, (log_eps_max+log_eps_min)/2)
        
        if p_n == 105:
            str_p_n = 'i'
        else:
            str_p_n = str(p_n)
        
        print("[L1] method = CNN-Cert-{}, model = {}, image no = {}, true_id = {}, target_label = {}, true_label = {}, norm = {}, robustness = {:.5f}".format(activation,file_name, i, true_ids[i],target_label,predict_label,str_p_n,np.exp(log_eps_min)))
        summation += np.exp(log_eps_min)
    K.clear_session()
    
    eps_avg = summation/len(inputs)
    total_time = (time.time()-start_time)/len(inputs)
    print("[L0] method = CNN-Cert-{}, model = {}, total images = {}, norm = {}, avg robustness = {:.5f}, avg runtime = {:.2f}".format(activation,file_name,len(inputs),str_p_n,eps_avg,total_time))
    return eps_avg, total_time
Example #16
0
from tensorflow.contrib.keras.api.keras.preprocessing import image
from tensorflow.contrib.keras import backend
from tensorflow.contrib.keras.api.keras.models import load_model
import numpy as np
import os

script_dir = os.path.dirname(__file__)
# Load pre-trained model
model_backup_path = os.path.join(script_dir, '../dataset/cat_or_dogs_model.h5')
test_set_path = os.path.join(script_dir, '../dataset/single_prediction')

classifier = load_model(model_backup_path)

input_size = (128, 128)

test_images_path = [
    test_set_path + '/' + filename for filename in os.listdir(test_set_path)
]
test_images = np.array([
    image.img_to_array(image.load_img(test_image_name, target_size=input_size))
    for test_image_name in test_images_path
])

# No need to rescale the images here... why?

predictions = classifier.predict(test_images)

for prediction, image_path in zip(predictions, test_images_path):
    if prediction == 1:
        prediction = 'dog'
    else:
Example #17
0
EMBEDDING_PATH = [path_to_embeddings + 'glove.6B.{}d.txt'.format(embedding_dims),
                  path_to_embeddings + 'glove.twitter.27B.{}d.txt'.format(embedding_dims)]
MODEL_PATH = [path_to_models + '@_glove{}d_cnn_2layer_3_3'.format(embedding_dims),
              path_to_models + '@_glovetwitter{}d_cnn_2layer_3_3'.format(embedding_dims)]

for (dataset, maxlen) in zip(DATASETS, MAXLEN):
    for (embedding_prefix, embedding_path, model_path) in zip(EMBEDDING_PREFIX, EMBEDDING_PATH, MODEL_PATH):
        
        # Replace special character for dataset
        model_path = model_path.replace('@', dataset)
        # Start measuring time
        start_time = time.time()  

        # 1. load the neural network
        print("[logger]: Loading Neural Network model from {}".format(model_path))
        model = load_model(model_path)

        # 2. load IMDB dataset
        print("[logger]: Loading {} dataset with maxlen={}, emb_dims={}".format(dataset, maxlen, embedding_dims))
        k = min(1000, n_sims)  # this should be larger than the number of experiments
        if dataset == 'imdb':
            (_,_), (X_test, y_test) = load_IMDB_dataset(embedding_path, embedding_dims, maxlen, num_samples=-1, return_text=False)
            (_,_), (x_text, _) = load_IMDB_dataset(embedding_path, embedding_dims, maxlen, num_samples=-1, return_text=True)
        elif dataset == 'sst':
            (_,_), (X_test, y_test) = load_SST_dataset(embedding_path, embedding_dims, maxlen, num_samples=-1, return_text=False)
            (_,_), (x_text, _) = load_SST_dataset(embedding_path, embedding_dims, maxlen, num_samples=-1, return_text=True) 
        elif dataset == 'qa':
            (_,_), (X_test, y_test) = load_QA_dataset(embedding_path, embedding_dims, maxlen, num_samples=-1, return_text=False)
            (_,_), (x_text, _) = load_QA_dataset(embedding_path, embedding_dims, maxlen, num_samples=-1, return_text=True) 
        elif dataset == 'ag':
            (_,_), (X_test, y_test) = load_AG_dataset(embedding_path, embedding_dims, maxlen, num_samples=-1, return_text=False)
    def load_model(self,
                   dataset="mnist",
                   model_name="2-layer",
                   activation="relu",
                   model=None,
                   batch_size=0,
                   compute_slope=False,
                   order=1):
        """
        model: if set to None, then load dataset with model_name. Otherwise use the model directly.
        dataset: mnist, cifar and imagenet. recommend to use mnist and cifar as a starting point.
        model_name: possible options are 2-layer, distilled, and normal
        """
        from setup_cifar import CIFAR, CIFARModel, TwoLayerCIFARModel
        from setup_mnist import MNIST, MNISTModel, TwoLayerMNISTModel
        from setup_tinyimagenet import tinyImagenet

        # if set this to true, we will use the logit layer output instead of probability
        # the logit layer's gradients are usually larger and more stable
        output_logits = True
        self.dataset = dataset
        self.model_name = model_name

        if model is None:
            print('Loading model...')
            if dataset == "mnist":
                self.batch_size = 1024

                model = CNNModel(load_model(model_name,
                                            custom_objects={
                                                'fn': fn,
                                                'ResidualStart': ResidualStart,
                                                'ResidualStart2':
                                                ResidualStart2,
                                                'tf': tf
                                            }),
                                 inp_shape=(28, 28, 1))
            elif dataset == "cifar":
                self.batch_size = 1024

                model = CNNModel(load_model(model_name,
                                            custom_objects={
                                                'fn': fn,
                                                'ResidualStart': ResidualStart,
                                                'ResidualStart2':
                                                ResidualStart2,
                                                'tf': tf
                                            }),
                                 inp_shape=(32, 32, 3))
            elif dataset == "tinyimagenet":
                self.batch_size = 32

                model = CNNModel(load_model(model_name,
                                            custom_objects={
                                                'fn': fn,
                                                'ResidualStart': ResidualStart,
                                                'ResidualStart2':
                                                ResidualStart2,
                                                'tf': tf
                                            }),
                                 inp_shape=(64, 64, 3))
            else:
                raise (RuntimeError("dataset unknown"))

        #print("*** Loaded model successfully")

        self.model = model
        self.compute_slope = compute_slope
        if batch_size != 0:
            self.batch_size = batch_size

        ## placeholders: self.img, self.true_label, self.target_label
        # img is the placeholder for image input
        self.img = tf.placeholder(shape=[
            None, model.image_size, model.image_size, model.num_channels
        ],
                                  dtype=tf.float32)
        # output is the output tensor of the entire network
        self.output = model.predict(self.img)
        # create the graph to compute gradient
        # get the desired true label and target label
        self.true_label = tf.placeholder(dtype=tf.int32, shape=[])
        self.target_label = tf.placeholder(dtype=tf.int32, shape=[])
        true_output = self.output[:, self.true_label]
        target_output = self.output[:, self.target_label]
        # get the difference
        self.objective = true_output - target_output
        # get the gradient(deprecated arguments)
        self.grad_op = tf.gradients(self.objective, self.img)[0]
        # compute gradient norm: (in computation graph, so is faster)
        grad_op_rs = tf.reshape(self.grad_op, (tf.shape(self.grad_op)[0], -1))
        self.grad_2_norm_op = tf.norm(grad_op_rs, axis=1)
        self.grad_1_norm_op = tf.norm(grad_op_rs, ord=1, axis=1)
        self.grad_inf_norm_op = tf.norm(grad_op_rs, ord=np.inf, axis=1)

        ### Lily: added Hessian-vector product calculation here for 2nd order bound:
        if order == 2:
            ## _hessian_vector_product(ys, xs, v): return a list of tensors containing the product between the Hessian and v
            ## ys: a scalar valur or a tensor or a list of tensors to be summed to yield of scalar
            ## xs: a list of tensors that we should construct the Hessian over
            ## v: a list of tensors with the same shape as xs that we want to multiply by the Hessian
            # self.randv: shape = (Nimg,28,28,1) (the v in _hessian_vector_product)
            self.randv = tf.placeholder(shape=[
                None, model.image_size, model.image_size, model.num_channels
            ],
                                        dtype=tf.float32)
            # hv_op_tmp: shape = (Nimg,28,28,1) for mnist, same as self.img (the xs in _hessian_vector_product)
            hv_op_tmp = gradients_impl._hessian_vector_product(
                self.objective, [self.img], [self.randv])[0]
            # hv_op_rs: reshape hv_op_tmp to hv_op_rs whose shape = (Nimg, 784) for mnist
            hv_op_rs = tf.reshape(hv_op_tmp, (tf.shape(hv_op_tmp)[0], -1))
            # self.hv_norm_op: norm of hessian vector product, keep shape = (Nimg,1) using keepdims
            self.hv_norm_op = tf.norm(hv_op_rs, axis=1, keepdims=True)
            # hv_op_rs_normalize: normalize Hv to Hv/||Hv||, shape = (Nimg, 784)
            hv_op_rs_normalize = hv_op_rs / self.hv_norm_op
            # self.hv_op: reshape hv_op_rs_normalize to shape = (Nimg,28,28,1)
            self.hv_op = tf.reshape(hv_op_rs_normalize, tf.shape(hv_op_tmp))

            ## reshape randv and compute its norm
            # shape: (Nimg, 784)
            randv_rs = tf.reshape(self.randv, (tf.shape(self.randv)[0], -1))
            # shape: (Nimg,)
            self.randv_norm_op = tf.norm(randv_rs, axis=1)
            ## compute v'Hv: use un-normalized Hv (hv_op_tmp, hv_op_rs)
            # element-wise multiplication and then sum over axis = 1 (now shape: (Nimg,))
            self.vhv_op = tf.reduce_sum(tf.multiply(randv_rs, hv_op_rs),
                                        axis=1)
            ## compute Rayleigh quotient: v'Hv/v'v (estimated largest eigenvalue), shape: (Nimg,)
            # note: self.vhv_op and self.randv_norm_op has to be in the same dimension (either (Nimg,) or (Nimg,1))
            self.eig_est = self.vhv_op / tf.square(self.randv_norm_op)

            ## Lily added the tf.while to compute the eigenvalue in computational graph later
            # cond for computing largest abs/neg eigen-value
            def cond(it, randv, eig_est, eig_est_prev, tfconst):
                norm_diff = tf.norm(eig_est - eig_est_prev, axis=0)
                return tf.logical_and(it < 500, norm_diff > 0.001)

            # compute largest abs eigenvalue: tfconst = 0
            # compute largest neg eigenvalue: tfconst = 10
            def body(it, randv, eig_est, eig_est_prev, tfconst):
                #hv_op_tmp = gradients_impl._hessian_vector_product(self.objective, [self.img], [randv])[0]-10*randv
                hv_op_tmp = gradients_impl._hessian_vector_product(
                    self.objective, [self.img], [randv])[0] - tf.multiply(
                        tfconst, randv)
                hv_op_rs = tf.reshape(hv_op_tmp, (tf.shape(hv_op_tmp)[0], -1))
                hv_norm_op = tf.norm(hv_op_rs, axis=1, keepdims=True)
                hv_op_rs_normalize = hv_op_rs / hv_norm_op
                hv_op = tf.reshape(hv_op_rs_normalize, tf.shape(hv_op_tmp))

                randv_rs = tf.reshape(randv, (tf.shape(randv)[0], -1))
                randv_norm_op = tf.norm(randv_rs, axis=1)
                vhv_op = tf.reduce_sum(tf.multiply(randv_rs, hv_op_rs), axis=1)
                eig_est_prev = eig_est
                eig_est = vhv_op / tf.square(randv_norm_op)

                return (it + 1, hv_op, eig_est, eig_est_prev, tfconst)

            it = tf.constant(0)
            # compute largest abs eigenvalue
            result = tf.while_loop(
                cond, body,
                [it, self.randv, self.vhv_op, self.eig_est,
                 tf.constant(0.0)])
            # compute largest neg eigenvalue
            self.shiftconst = tf.placeholder(shape=(), dtype=tf.float32)
            result_1 = tf.while_loop(
                cond, body,
                [it, self.randv, self.vhv_op, self.eig_est, self.shiftconst])

            # computing largest abs eig value and save result
            self.it = result[0]
            self.while_hv_op = result[1]
            self.while_eig = result[2]

            # computing largest neg eig value and save result
            self.it_1 = result_1[0]
            #self.while_eig_1 = tf.add(result_1[2], tfconst)
            self.while_eig_1 = tf.add(result_1[2], result_1[4])

            show_tensor_op = False
            if show_tensor_op:
                print("====================")
                print("Define hessian_vector_product operator: ")
                print("hv_op_tmp = {}".format(hv_op_tmp))
                print("hv_op_rs = {}".format(hv_op_rs))
                print("self.hv_norm_op = {}".format(self.hv_norm_op))
                print("hv_op_rs_normalize = {}".format(hv_op_rs_normalize))
                print("self.hv_op = {}".format(self.hv_op))
                print("self.grad_op = {}".format(self.grad_op))
                print("randv_rs = {}".format(randv_rs))
                print("self.randv_norm_op = {}".format(self.randv_norm_op))
                print("self.vhv_op = {}".format(self.vhv_op))
                print("self.eig_est = {}".format(self.eig_est))
                print("====================")

        return self.img, self.output, self.model
Example #19
0
def main_fun(args, ctx):
    import numpy
    import os
    import tensorflow as tf
    import tensorflow.contrib.keras as keras
    from tensorflow.contrib.keras.api.keras import backend as K
    from tensorflow.contrib.keras.api.keras.models import Sequential, load_model, save_model
    from tensorflow.contrib.keras.api.keras.layers import Dense, Dropout
    from tensorflow.contrib.keras.api.keras.optimizers import RMSprop
    from tensorflow.contrib.keras.python.keras.callbacks import LambdaCallback, TensorBoard

    from tensorflow.python.saved_model import builder as saved_model_builder
    from tensorflow.python.saved_model import tag_constants
    from tensorflow.python.saved_model.signature_def_utils_impl import predict_signature_def

    from tensorflowonspark import TFNode

    cluster, server = TFNode.start_cluster_server(ctx)

    if ctx.job_name == "ps":
        server.join()
    elif ctx.job_name == "worker":

        def generate_rdd_data(tf_feed, batch_size):
            print("generate_rdd_data invoked")
            while True:
                batch = tf_feed.next_batch(batch_size)
                imgs = []
                lbls = []
                for item in batch:
                    imgs.append(item[0])
                    lbls.append(item[1])
                images = numpy.array(imgs).astype('float32') / 255
                labels = numpy.array(lbls).astype('float32')
                yield (images, labels)

        with tf.device(
                tf.train.replica_device_setter(
                    worker_device="/job:worker/task:%d" % ctx.task_index,
                    cluster=cluster)):

            IMAGE_PIXELS = 28
            batch_size = 100
            num_classes = 10

            # the data, shuffled and split between train and test sets
            if args.input_mode == 'tf':
                from tensorflow.contrib.keras.api.keras.datasets import mnist
                (x_train, y_train), (x_test, y_test) = mnist.load_data()
                x_train = x_train.reshape(60000, 784)
                x_test = x_test.reshape(10000, 784)
                x_train = x_train.astype('float32') / 255
                x_test = x_test.astype('float32') / 255

                # convert class vectors to binary class matrices
                y_train = keras.utils.to_categorical(y_train, num_classes)
                y_test = keras.utils.to_categorical(y_test, num_classes)
            else:  # args.mode == 'spark'
                x_train = tf.placeholder(tf.float32,
                                         [None, IMAGE_PIXELS * IMAGE_PIXELS],
                                         name="x_train")
                y_train = tf.placeholder(tf.float32, [None, 10],
                                         name="y_train")

            model = Sequential()
            model.add(Dense(512, activation='relu', input_shape=(784, )))
            model.add(Dropout(0.2))
            model.add(Dense(512, activation='relu'))
            model.add(Dropout(0.2))
            model.add(Dense(10, activation='softmax'))

            model.summary()

            model.compile(loss='categorical_crossentropy',
                          optimizer=RMSprop(),
                          metrics=['accuracy'])

        saver = tf.train.Saver()

        with tf.Session(server.target) as sess:
            K.set_session(sess)

            def save_checkpoint(epoch, logs=None):
                if epoch == 1:
                    tf.train.write_graph(sess.graph.as_graph_def(),
                                         args.model_dir, 'graph.pbtxt')
                saver.save(sess,
                           os.path.join(args.model_dir, 'model.ckpt'),
                           global_step=epoch * args.steps_per_epoch)

            ckpt_callback = LambdaCallback(on_epoch_end=save_checkpoint)
            tb_callback = TensorBoard(log_dir=args.model_dir,
                                      histogram_freq=1,
                                      write_graph=True,
                                      write_images=True)

            # add callbacks to save model checkpoint and tensorboard events (on worker:0 only)
            callbacks = [ckpt_callback, tb_callback
                         ] if ctx.task_index == 0 else None

            if args.input_mode == 'tf':
                # train & validate on in-memory data
                history = model.fit(x_train,
                                    y_train,
                                    batch_size=batch_size,
                                    epochs=args.epochs,
                                    verbose=1,
                                    validation_data=(x_test, y_test),
                                    callbacks=callbacks)
            else:  # args.input_mode == 'spark':
                # train on data read from a generator which is producing data from a Spark RDD
                tf_feed = TFNode.DataFeed(ctx.mgr)
                history = model.fit_generator(
                    generator=generate_rdd_data(tf_feed, batch_size),
                    steps_per_epoch=args.steps_per_epoch,
                    epochs=args.epochs,
                    verbose=1,
                    callbacks=callbacks)

            if args.export_dir and ctx.job_name == 'worker' and ctx.task_index == 0:
                # save a local Keras model, so we can reload it with an inferencing learning_phase
                save_model(model, "tmp_model")

                # reload the model
                K.set_learning_phase(False)
                new_model = load_model("tmp_model")

                # export a saved_model for inferencing
                builder = saved_model_builder.SavedModelBuilder(
                    args.export_dir)
                signature = predict_signature_def(
                    inputs={'images': new_model.input},
                    outputs={'scores': new_model.output})
                builder.add_meta_graph_and_variables(
                    sess=sess,
                    tags=[tag_constants.SERVING],
                    signature_def_map={'predict': signature},
                    clear_devices=True)
                builder.save()

            if args.input_mode == 'spark':
                tf_feed.terminate()
Example #20
0
from PIL import Image
from tensorflow.contrib.keras.api.keras.models import load_model
import cv2
import numpy as np
characters="京沪津渝冀晋蒙辽吉黑苏浙皖闽赣鲁豫鄂湘粤桂琼川贵云藏陕甘青宁新0123456789ABCDEFGHJKLMNPQRSTUVWXYZ"

def decode(y):
    y = np.argmax(np.array(y), axis=2)[:,0]
    return ''.join([characters[x] for x in y])

print("开始导入模型")

model = load_model("./tf12_ResNer34v2_model.h5")
print("导入模型完成")
print("读取图片")
pic = Image.open("./dataSet/15.jpg")
pic.show()
#这里换两种方式是因为两种方式显示的通道顺序不同

img = cv2.imread("./dataSet/15.jpg")
#print(img)
img = cv2.resize(img,(224,224))
img=img[np.newaxis,:,:,:]#图片是三维的但是训练时是转换成4维了所以需要增加一个维度
predict = model.predict(img)

print("车牌号为:",decode(predict))
    def __init__(self, file_name, inp_shape=(28, 28, 1)):
        model = load_model(file_name, custom_objects={'fn': loss, 'tf': tf})
        temp_weights = [layer.get_weights() for layer in model.layers]

        self.weights = []
        self.biases = []
        self.model = model

        cur_shape = inp_shape
        i = 0
        while i < len(model.layers):
            layer = model.layers[i]
            i += 1
            weights = layer.get_weights()
            if type(layer) == Conv2D:
                print('conv')
                if len(weights) == 1:
                    W = weights[0].astype(np.float32)
                    b = np.zeros(W.shape[-1], dtype=np.float32)
                else:
                    W, b = weights
                    W = W.astype(np.float32)
                    b = b.astype(np.float32)

                if type(model.layers[i + 1]) == BatchNormalization:
                    print('batch normalization')
                    gamma, beta, mean, std = weights
                    std = np.sqrt(std**2 + 0.001)  #Avoids zero division
                    aa = gamma / std
                    bb = gamma * mean / std + beta
                    W = aa * W
                    b = aa * b + bb
                    i += 1

                new_shape = (cur_shape[0] - W.shape[0] + 1,
                             cur_shape[1] - W.shape[1] + 1, W.shape[-1])

                flat_inp = np.prod(cur_shape)
                flat_out = np.prod(new_shape)
                W_flat = np.zeros((flat_inp, flat_out))
                b_flat = np.zeros((flat_out))
                m, n, p = cur_shape
                d, e, f = new_shape
                for x in range(d):
                    for y in range(e):
                        for z in range(f):
                            b_flat[e * f * x + f * y + z] = b[z]
                            for k in range(p):
                                for idx0 in range(W.shape[0]):
                                    for idx1 in range(W.shape[1]):
                                        ii = idx0 + x
                                        jj = idx1 + y
                                        W_flat[n * p * ii + p * jj + k,
                                               e * f * x + f * y + z] = W[idx0,
                                                                          idx1,
                                                                          k, z]

                cur_shape = new_shape
                self.weights.append(W_flat.T)
                self.biases.append(b_flat.T)
            elif type(layer) == Activation:
                print('activation')
            elif type(layer) == Lambda:
                print('lambda')
            elif type(layer) == InputLayer:
                print('input')
            elif type(layer) == Dense:
                print('FC')
                W, b = weights
                self.weights.append(W.T)
                self.biases.append(b.T)
            elif type(layer) == BatchNormalization:
                print('batch normalization')
            elif type(layer) == Dropout:
                print('dropout')
            elif type(layer) == MaxPooling2D:
                print('pool')
                pool_size = layer.get_config()['pool_size']
                stride = layer.get_config()['strides']
                pad = (0, 0, 0, 0)  #p_hl, p_hr, p_wl, p_wr
                cur_shape = (
                    int((cur_shape[0] + pad[0] + pad[1] - pool_size[0]) /
                        stride[0]) + 1,
                    int((cur_shape[1] + pad[2] + pad[3] - pool_size[1]) /
                        stride[1]) + 1, cur_shape[2])
            elif type(layer) == Flatten:
                print('flatten')
            elif type(layer) == Reshape:
                print('reshape')
            else:
                print(str(type(layer)))
                raise ValueError('Invalid Layer Type')
        print(cur_shape)
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 13 09:45:09 2019
@author: One
"""

from PIL import Image
from tensorflow.contrib.keras.api.keras.models import load_model
import cv2
import numpy as np

characters = "京沪津渝冀晋蒙辽吉黑苏浙皖闽赣鲁豫鄂湘粤桂琼川贵云藏陕甘青宁新0123456789ABCDEFGHJKLMNPQRSTUVWXYZ"


def decode(y):
    y = np.argmax(np.array(y), axis=2)[:, 0]
    return ''.join([characters[x] for x in y])


model = load_model("resnet34_model.h5")
print("导入模型完成")
print("读取图片")
pic = Image.open("C:/Users/One/Desktop/y.jpg")
pic.show()
#这里换两种方式是因为两种方式显示的通道顺序不同

img = cv2.imread("C:/Users/One/Desktop/y.jpg")
img = img[np.newaxis, :, :, :]  #图片是三维的但是训练时是转换成4维了所以需要增加一个维度
predict = model.predict(img)

print("车牌号为:", decode(predict))
Example #23
0
def class_raster(in_list, model_file, size_wind, class_block_dir):

    out_driver = gdal.GetDriverByName('GTiff')

    for isub_tif in in_list:

        isub_filename = os.path.splitext(os.path.basename(
            list(isub_tif)[1]))[0]
        iclass_out_file = os.path.join(class_block_dir,
                                       '%s_class.tif' % isub_filename)

        if os.path.exists(iclass_out_file):
            out_driver.Delete(iclass_out_file)

        file_name = os.path.splitext(os.path.basename(
            list(isub_tif)[1]))[0].split('_')[-4:]
        ysize = int(file_name[3])
        num_xsize = int(file_name[2])
        num_sub_size = int(size_wind / 2)

        sds = gdal.Open(list(isub_tif)[1])
        if sds is None:
            sys.exit('Problem opening file %s!' % list(isub_tif)[1])

        sds_xsize = sds.RasterXSize
        sds_ysize = sds.RasterYSize
        num_band = sds.RasterCount

        in_data = sds.ReadAsArray(0, 0, sds_xsize, sds_ysize)
        temp_data = np.zeros((1, size_wind, size_wind, num_band),
                             dtype=np.float)
        xind_list = []
        yind_list = []

        out_dataset = out_driver.Create(iclass_out_file, num_xsize, ysize, 1,
                                        gdal.GDT_Byte)
        out_band = out_dataset.GetRasterBand(1)

        out_data = np.zeros((ysize, num_xsize), dtype=np.uint8)
        out_data[:, :] = 200

        for iyoffset in range(ysize):
            for ixoffset in range(num_xsize):

                if in_data[0, (iyoffset + num_sub_size):(iyoffset +
                                                         num_sub_size + 1),
                           (ixoffset + num_sub_size):(ixoffset + num_sub_size +
                                                      1)][0][0] <= 0:
                    continue

                isample_data = in_data[:, iyoffset:(iyoffset + size_wind),
                                       ixoffset:(ixoffset + size_wind)]
                # print(isample_data.shape)

                # is_t = isample_data.T
                # is_t_r = is_t.reshape(1, size_wind, size_wind, num_band).reshape(size_wind, size_wind, num_band)
                temp_data = np.vstack(
                    (temp_data,
                     isample_data.T.reshape(1, size_wind, size_wind,
                                            num_band)))
                xind_list.append(iyoffset)
                yind_list.append(ixoffset)

                isample_data = None

        model = load_model(model_file)
        y_pred = model.predict_classes(temp_data[1:, :, :, :] / 10000)

        for iout in range(len(xind_list)):
            out_data[xind_list[iout], yind_list[iout]] = y_pred[iout]

        out_band.WriteArray(out_data, 0, 0)
        temp_data = None
        out_data = None
        out_band = None
        out_dataset = None
        sds = None
def main(model_name, data, epsilon=0.111):

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True  # dynamically grow the memory used on the GPU
    config.log_device_placement = False
    sess = tf.Session(config=config)
    with sess.as_default():

        def loss(correct, predicted):
            return tf.nn.softmax_cross_entropy_with_logits(labels=correct, logits=predicted)

        images = []
        true_labels = []
        true_labels_one_hot = []
        targets = []
        i = 0
        while len(images) < 5:
            image = data.test_data[i]
            label = np.argmax(data.test_labels[i])
            num_classes = len(data.test_labels[0])
            if label not in true_labels:
                images.append(image)
                true_labels.append(label)
                true_labels_one_hot.append(data.test_labels[i])
                targets.append(label + 1 % num_classes)
            i += 1

        model = load_model(model_name, custom_objects={'fn': loss, 'tf': tf})

        image_size = data.test_data.shape[1]
        num_channels = data.test_data.shape[3]
        num_labels = data.test_labels.shape[1]

        shape = (None, image_size, image_size, num_channels)
        model.x_input = tf.placeholder(tf.float32, shape)
        model.y_input = tf.placeholder(tf.float32, [None, num_labels])

        pre_softmax = model(model.x_input)
        y_loss = tf.nn.softmax_cross_entropy_with_logits(labels=model.y_input, logits=pre_softmax)
        model.xent = tf.reduce_sum(y_loss)

        adv_steps = 40
        attack = LinfPGDAttack(model, epsilon, adv_steps, epsilon * 1.33 / adv_steps, random_start=True)
        advs = attack.perturb(np.array(images), true_labels_one_hot, sess)
        advs2 = LinfPGDAttack(model, 0.01, adv_steps, 0.01 * 1.33 / adv_steps, random_start=True).perturb(np.array(images), true_labels_one_hot, sess)

    fig = plt.figure(figsize=(10, 4))

    def plot_images(x_position, images, sets, gray_scale=False):
        images = images + 0.5
        images = images / np.max(images)
        for i in range(len(images)):
            ax = fig.add_subplot(sets, 5, i + 5*x_position+1)
            if gray_scale:
                plt.imshow(images[i], cmap='gray')
            else:
                plt.imshow(images[i])
            ax.get_xaxis().set_visible(False)
            ax.get_yaxis().set_visible(False)
    if num_channels == 1:
        plot_images(0, np.reshape(images, [-1, 28, 28]), 3, gray_scale=True)
        plot_images(1, np.reshape(advs, [-1, 28, 28]), 3, gray_scale=True)
    else:
        plot_images(0, np.reshape(images, [-1, 32, 32, 3]), 3)
        plot_images(1, np.reshape(advs, [-1, 32, 32, 3]), 3)
    plt.show()
Example #25
0
    heatmap = cam / np.max(cam)

    # Return to BGR [0..255] from the preprocessed image
    image = image[0, :]
    image -= np.min(image)
    image = np.minimum(image, 255)

    cam = cv2.applyColorMap(np.uint8(255 * heatmap), cv2.COLORMAP_JET)
    cam = np.float32(cam) + np.float32(image)
    cam = 255 * cam / np.max(cam)
    return np.uint8(cam), heatmap


preprocessed_input = load_image(find(INPUT, INPUT_FOLDER))
k.set_learning_phase(0)
model = load_model('cifarClassification.h5')
layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])
predictions = model.predict(preprocessed_input)

top_3 = decode_predictions(predictions)[0][0:3]
print('Predicted class:')
for x in range(0, len(top_3)):
    print('%s with probability %.2f' % (top_3[x][0], top_3[x][1]))

predicted_class = np.argmax(predictions)
cam, heatmap = grad_cam(model, preprocessed_input, predicted_class, layer_name)
cv2.imwrite(
    OUTPUT_FOLDER + "gradcam_" + INPUT[:-5] + "_" + layer_name + ".jpg", cam)
print('Gradiant class activation image saved in the current directory!')

register_gradient()