예제 #1
0
def tf_testing_2():
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    x = tf.placeholder(tf.float32, shape=[None, 784])
    inter, stepsize, ref = ig.linear_inpterpolation(x, num_steps=50)

    y_ = tf.placeholder(tf.float32, shape=[None, 10])

    W_conv1 = weight_variable([5, 5, 1, 32])
    b_conv1 = bias_variable([32])
    x_image = tf.reshape(x, [-1, 28, 28, 1])
    x_image_inter = tf.reshape(inter, [-1, 28, 28, 1])

    h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
    h_conv1_inter = tf.nn.relu(conv2d(x_image_inter, W_conv1) + b_conv1)

    h_pool1 = max_pool_2x2(h_conv1)
    h_pool1_inter = max_pool_2x2(h_conv1_inter)

    W_conv2 = weight_variable([5, 5, 32, 64])
    b_conv2 = bias_variable([64])
    h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
    h_conv2_inter = tf.nn.relu(conv2d(h_pool1_inter, W_conv2) + b_conv2)

    h_pool2 = max_pool_2x2(h_conv2)
    h_pool2_inter = max_pool_2x2(h_conv2_inter)

    W_fc1 = weight_variable([7 * 7 * 64, 1024])
    b_fc1 = bias_variable([1024])
    h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
    h_pool2_flat_inter = tf.reshape(h_pool2_inter, [-1, 7 * 7 * 64])

    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
    h_fc1_inter = tf.nn.relu(tf.matmul(h_pool2_flat_inter, W_fc1) + b_fc1)

    keep_prob = tf.placeholder(tf.float32)

    W_fc2 = weight_variable([1024, 10])
    b_fc2 = bias_variable([10])
    y_conv = tf.matmul(h_fc1, W_fc2) + b_fc2  #No-dropout version
    y_conv_inter = tf.matmul(h_fc1_inter, W_fc2) + b_fc2  #No-dropout version
    prediction = tf.nn.softmax(y_conv)
    prediction2 = tf.nn.softmax(y_conv_inter)

    explanations = []
    exp0 = ig.build_ig(inter, stepsize, prediction2[:, 0], num_steps=50)
    explanations.append(exp0)
    exp1 = ig.build_ig(inter, stepsize, prediction2[:, 1], num_steps=50)
    explanations.append(exp1)
    exp2 = ig.build_ig(inter, stepsize, prediction2[:, 2], num_steps=50)
    explanations.append(exp2)
    exp3 = ig.build_ig(inter, stepsize, prediction2[:, 3], num_steps=50)
    explanations.append(exp3)
    exp4 = ig.build_ig(inter, stepsize, prediction2[:, 4], num_steps=50)
    explanations.append(exp4)
    exp5 = ig.build_ig(inter, stepsize, prediction2[:, 5], num_steps=50)
    explanations.append(exp5)
    exp6 = ig.build_ig(inter, stepsize, prediction2[:, 6], num_steps=50)
    explanations.append(exp6)
    exp7 = ig.build_ig(inter, stepsize, prediction2[:, 7], num_steps=50)
    explanations.append(exp7)
    exp8 = ig.build_ig(inter, stepsize, prediction2[:, 8], num_steps=50)
    explanations.append(exp8)
    exp9 = ig.build_ig(inter, stepsize, prediction2[:, 9], num_steps=50)
    explanations.append(exp9)

    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
    train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    saver = tf.train.Saver(
        {
            "W_conv1": W_conv1,
            "b_conv1": b_conv1,
            "W_conv2": W_conv2,
            "b_conv2": b_conv2,
            "W_fc1": W_fc1,
            "b_fc1": b_fc1,
            "W_fc2": W_fc2,
            "b_fc2": b_fc2
        }
    )  #maybe try [W_conv1, b_conv1, h_pool1, W_conv2, b_conv2, h_pool2, W_fc1, b_fc1, h_fc1, W_fc2, b_fc2] as an argument?
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(20000):
            batch = mnist.train.next_batch(50)
            if i % 100 == 0:
                train_accuracy = accuracy.eval(feed_dict={
                    x: batch[0],
                    y_: batch[1],
                    keep_prob: 1.0
                },
                                               session=sess)
                print('step %d, training accuracy %g' % (i, train_accuracy))
                #tf.train.Saver().save(sess, 'tf_models/mnist_iter', global_step=i)
            train_step.run(feed_dict={
                x: batch[0],
                y_: batch[1],
                keep_prob: 0.5
            },
                           session=sess)

        print('test accuracy %g' % accuracy.eval(feed_dict={
            x: mnist.test.images,
            y_: mnist.test.labels,
            keep_prob: 1.0
        },
                                                 session=sess))

        saver.save(sess, 'tf_models/mnist_no_dropout')
        #saver.save(sess, 'tf_models/mnist')
        np.set_printoptions(threshold=np.nan)
        f = open("./example_10.txt", 'r')
        lines = f.readlines()
        thing = str.split(lines[0], ',')
        thing = [float(a) + 0.5 for a in thing]
        #print(str(len(thing)))
        im_data = np.array(thing[1:], dtype=np.float32)
        data = np.ndarray.flatten(im_data)
        feed_dict = {x: [data], keep_prob: 1.0}

        result = y_conv.eval(feed_dict)
        print('Original output:')
        print(str(result))
        print(result)

        result = prediction.eval(feed_dict)[0]
        print('Prediction:')
        print(result)

        result1 = tf.argmax((prediction.eval(feed_dict)[0]), 0)
        print('Predicted Label:')
        print(result1.eval())

        result = prediction2.eval(feed_dict)
        print('IG Prediction:')
        print(result)

        result = prediction2.eval(feed_dict)[:, result1.eval()]
        print('IG Prediction Label:')
        print(result)

        result = (explanations[result1.eval()]).eval(feed_dict)
        print('IG Attribution:')
        print(result)
예제 #2
0
    def build(self):
        # Placeholders for input and dropout probs.
        if self.built:
            return -1
        else:
            self.built = True

        x = tf.placeholder(tf.float32, shape=[None, 28, 28], name="x")
        _x = tf.contrib.slim.flatten(x)
        y = tf.placeholder(tf.int64, shape=[None, 10], name="y")

        # Buildin IG model
        inter, stepsize, ref = ig.linear_inpterpolation(_x, num_steps=50)

        # Fully connected encoder.
        with tf.variable_scope("predictor"):
            dense = _x
            for dim in self.dimensions:
                dense = tf.contrib.slim.fully_connected(
                    dense, dim, activation_fn=tf.nn.relu)
            dense = tf.contrib.slim.fully_connected(dense,
                                                    10,
                                                    activation_fn=tf.identity)
            prediction = tf.nn.softmax(dense)

        with tf.variable_scope("predictor", reuse=True):
            dense2 = inter
            for dim in self.dimensions:
                dense2 = tf.contrib.slim.fully_connected(
                    dense2, dim, activation_fn=tf.nn.relu)
            dense2 = tf.contrib.slim.fully_connected(dense2,
                                                     10,
                                                     activation_fn=tf.identity)
            prediction2 = tf.nn.softmax(dense2)

        explanations = []
        for i in range(10):
            explanations.append(
                ig.build_ig(inter, stepsize, prediction2[:, i], num_steps=50))

        loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(logits=dense, labels=y))

        # Define cost as the sum of KL and reconstrunction ross with BinaryXent.
        with tf.name_scope("cost"):
            # average over minibatch
            cost = loss

        # Defining optimization procedure.
        with tf.name_scope("Adam_optimizer"):
            optimizer = tf.train.AdamOptimizer()
            tvars = tf.trainable_variables()
            grads_and_vars = optimizer.compute_gradients(cost, tvars)
            clipped = [(tf.clip_by_value(grad, -5, 5), tvar)
                       for grad, tvar in grads_and_vars]
            train = optimizer.apply_gradients(clipped, name="minimize_cost")

        # Exporting out the operaions as dictionary
        return dict(x=x,
                    y=y,
                    prediction=prediction,
                    cost=cost,
                    train=train,
                    explanations=explanations)
        num_classes=num_classes,
        iterator_size=FLAGS.pre_size,
        kth_init_op=FLAGS.iter_epoch,
        classifier_version=4,
    )

    # create an reinitializable iterator given the dataset structure
    iterator = Iterator.from_structure(pre_data.data.output_types,
                                       pre_data.data.output_shapes)
    next_batch = iterator.get_next()

# Ops for initializing the two different iterators
predicting_init_op = iterator.make_initializer(pre_data.data)

x = tf.placeholder(tf.float32, [None, 227, 227, num_channels])
inter, stepsize, ref = ig.linear_inpterpolation(x, num_steps=50)
keep_prob = tf.constant(1., dtype=tf.float32)

# Initialize model
# model = AlexNet(x, keep_prob, 2, [])
model_ig = AlexNet(inter, keep_prob, 2, [])

# Link variable to model output
# score = model.fc8
score_ig = model_ig.fc8

# softmax = tf.nn.softmax(score)
softmax_ig = tf.nn.softmax(score_ig)

# Calculate integrated gradients
explanations = []