Esempio n. 1
0
    def loss(timg, tlab, const, modifier):
      newimg = compute_newimage(timg, modifier)
      # prediction BEFORE-SOFTMAX of the model
      if self.sample <= 1:
        output = self.fn_logits(newimg)
      else:
        logging.info(
          "Monte Carlo (MC) on attacks, sample: {}".format(self.sample))
        for i in range(self.sample):
          logits = self.fn_logits(newimg)
          if i == 0:
            assert logits.op.type != 'Softmax'
          output.append(logits)
        output = tf.reduct_mean(output, 0)

      # distantce to the input data
      l2dist = get_l2dist(timg, newimg)

      # compute the probability of the label class versus the maximum other
      real_target = tf.reduce_sum((tlab) * output, 1)
      other_target = tf.reduce_max((1 - tlab) * output - tlab * 10000, 1)
      zero = tf.constant(0., dtype=tf_dtype)
      if self.y_target:
        # if targeted, optimize for making the other class most likely
        loss1 = tf.maximum(zero, other_target - real_target + self.confidence)
      else:
        # if untargeted, optimize for making this class least likely.
        loss1 = tf.maximum(zero, real_target - other_target + self.confidence)

      # sum up the losses
      loss2 = tf.reduce_sum(l2dist)
      loss1 = tf.reduce_sum(const * loss1)
      loss = loss1 + loss2
      return loss, output
Esempio n. 2
0
def train_painting(X, Y, Y_pred, img, n_iterations=500, batch_size=50, learning_rate=0.001)):
    cost = tf.reduct_mean(tf.reduce_sum(distance(Y_pred, Y), 1))
    optimizer = tf.train.AdamOptimizer(0.001).minimize(cost)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for it_i in range(n_iterations):
            idxs = np.random.permutation(len(xs))
            n_batches = len(idxs) // batch_size
            for batch_i in range(n_batches):
                idxs_i = idxs[batch_i * batch_size : (batch_i + 1)* batch_size]
                sess.run(optimizer, feed_dict={X:xs[idxs_i], Y:ys[idxs_i]})

            training_cost = sess.run(cost, feed_dict={X:xs, Y:ys})

            if it_i % 20 == 0:
                ys_pred = Y_pred.eval(feed_dict={X:xs}, session=sess)
                fig, ax = plt.subplots(1, 1)
                img_out = np.clip(ys_pred.reshape(img.shape), 0, 255).astype(np.uint8)
                plt.imshow(img_out)
                plt.show()
Esempio n. 3
0
    outputs,state = tf.nn.dynamic_rnn(mlstm_cell, inputs=X, initial_state=init_state, time_major=False)
    h_state = outputs[:,-1,:]  # 或者 h_state = state[-1][1]
# print(h_state.shape)
    #设置 loss function 和 优化器,展开训练并完成测试
    #我们要分类的话,还需要接一个 softmax 层
    # 首先定义 softmax 的连接权重矩阵和偏置
    W = tf.Variable(tf.truncated_normal([hidden_size,class_num],stddev=0.1),dtype=tf.float32)
    bias = tf.Variable(tf.constant(0.1,shape=[class_num]),dtype=tf.float32)
    y_pre = tf.nn.softmax(tf.matmul(h_state,W)+bias)

    # 损失 评估
    cross_entropy = -tf.reduce_mean(y*tf.log(y_pre))
    train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)

    correct_prediction = tf.equal(tf.argmax(y_pre,1),tf.argmax(y,1))
    accuracy = tf.reduct_mean(tf.cast(correct_prediction,"float32"))

    sess.run(tf.gloal_variables_initializer())
    for i in range(2000):
        _batch_size = 128
        batch = mnist.train.next_batch(_batch_size)
        print(batch[0])
        if (i+1)%200 == 0:
            train_accuracy = sess.run(accuracy,feed_dict={_X:batch[0],y:batch[1],keep_prob:1.0,batch_size:_batch_size})
            print("Iter%d, step %d, training accuracy %g" % ( mnist.train.epochs_completed, (i+1), train_accuracy))

    print(mnist.test.images.shape[0])
    # test
    print(" test accuracy %g" % (  sess.run(accuracy,feed_dict={
        _X:mnist.test.image,y:mnist.test.labels,keep_prob:1.0,batch_size:mnist.test.images.shape[0]})))
Esempio n. 4
0
            b2 = tf.Variable(tf.random_normal([256]), name='b2')
            x = tf.nn.tanh(tf.add(tf.matmul(x, W2), b2))
            # Add this `tanh` op to activations collection or monitoring
            tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, x)
            # Add weights regularizer (Regul. summary automatically added)
            tflearn.add_weights_regularizer(W2, 'L2', weight_decay=0.001)

        with tf.name_scope('Layer3'):
            W3 = tf.Variable(tf.random_normal([256, 10]), name='W3')
            b3 = tf.Variable(tf.random_normal([10]), name='b3')
            x = tf.add(tf.matmul(x, W3), b3)

        return x

    net = dnn(X)
    loss = tf.reduct_mean(tf.nn.softmax_cross_entropy_with_logits(net, Y))
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
    accuracy = tf.reduce_mean(
        tf.cast(tf.equal(tf.argmax(net, 1), tf.argmax(Y, 1)), tf.float32)
        name="acc")

    with tf.name_scope('CustomMonitor'):
        test_var = tf.reduce_mean(tf.cast(net, tf.float32), name='test_var')
        test_const = tf.constant(32.0, name="custom_constant")

    # Define a train op
    trainop = tflearn.TrainOp(loss=loss, optimizer=optimizer,
                              validation_monitors=[test_var, test_const],
                              metric=accuracy, batch_size=128)

    # Tensorboard logs stored in /tmp/tflearn_logs/. Using verbose level 2.
#encoding:utf8

from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import os
import sys

if __name__ == "__main__":
    mnist = input_data.read_data_sets("../MNIST_data", one_hot = True)
    sess = tf.InteractiveSession()     
    x = tf.placeholder(tf.float32, shape=[None, 784])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])

    W = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))

    sess.run(tf.global_variables_initializer())
    y = tf.matmul(x, W) + b
    
    cross_entropy = tf.reduct_mean(tf.nn.softmax_cross_entropy_with_logits(label = y_, logits = y))
    train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

    for _ in range(1000):
        batch = mnist.train.next_batch(100)
        train_step.run(feed_dict={x : batch[0], y_ : batch[1]})
    tf. 
    
Esempio n. 6
0
    b2 = tf.Variable(tf.random_normal([256]))
    b3 = tf.Variable(tf.random_normal([10]))

    # Multilayer perceptron
    def dnn(x):
        x = tf.nn.tanh(tf.add(tf.matmul(x, W1), b1))
        x = tf.nn.tanh(tf.add(tf.matmul(x, W2), b2))
        x = tf.add(tf.matmul(x, W3), b3)
        return x

    net = dnn(X)
    loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=net, labels=Y))
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
    accuracy = tf.reduct_mean(tf.cast(
        tf.equal(tf.argmax(net, 1), tf.argmax(Y, 1)), tf.float32),
                              name='acc')

    # Using TFLearn Trainer
    # Define a training op (op for backprop, only need 1 in this model)
    trainop = tflearn.TrainOp(loss=loss,
                              optimizer=optimizer,
                              metric=accuray,
                              batch_size=128)

    # Create trainer, providing all training ops. Tensorboard logs stored
    # in /tmp/tflearn_logs/. It is possible to change verbose level for more
    # details logs about gradients, variables etc...
    trainer = tflearn.Trainer(train_ops=trainop, tensorboard_verbose=0)
    # Training for 10 epochs.
    trainer.fit({