示例#1
0
def basic_mnist_input_corpus(choose_randomly=False, data_dir="/tmp/mnist"):
    """Returns the first image and label from MNIST.

    Args:
      choose_randomly: a boolean indicating whether to choose randomly.
      data_dir: a string giving the location of the original MNIST data.
    Returns:
      A single image and a single label.
    """

    dataset = mnist.train(data_dir)
    dataset = dataset.cache().shuffle(buffer_size=50000).batch(100).repeat()
    iterator = dataset.make_one_shot_iterator()
    images, integer_labels = iterator.get_next()
    images = tf.reshape(images, [-1, 28, 28, 1])
    # labels = tf.one_hot(integer_labels, 10)
    labels = integer_labels

    with tf.train.MonitoredTrainingSession() as sess:
        image_batch, label_batch = sess.run([images, labels])

    if choose_randomly:
        idx = random.choice(range(image_batch.shape[0]))
    else:
        idx = 0
    tf.logging.info("Seeding corpus with element at idx: %s", idx)
    return image_batch[idx], label_batch[idx]
示例#2
0
def main(_):
    """Trains the unstable model."""

    dataset = mnist.train(FLAGS.data_dir)
    dataset = dataset.cache().shuffle(buffer_size=50000).batch(
        100).repeat()  # 维持一个50000大小的shuffle buffer,每轮去除100个数据
    iterator = dataset.make_one_shot_iterator()  # 通过迭代器读取数据,数据输出一次后就丢弃了
    images, integer_labels = iterator.get_next()  # 获得图片和标签
    images = tf.reshape(images, [-1, 28, 28, 1])  # 改变形状,-1表示不知道大小

    # x_, integer_labels = iterator.get_next()
    # x_ = tf.reshape(x_, [-1, 28, 28, 1])
    label_input_tensor = tf.identity(integer_labels)  # 标签输入张量
    labels = tf.one_hot(label_input_tensor, 10)  # 使用独热编码对标签编码

    # input_layer, hidden_layer_1, hidden_layer_2, logits, image_input_tensor = classifier(images, init_func)  # 分类,得到logits和图片输入张量
    logits = output_layer
    equality = tf.equal(tf.argmax(logits, 1),
                        tf.argmax(labels, 1))  # 比较是否相等,argmax返回最大值的索引号
    accuracy = tf.reduce_mean(tf.to_float(equality))  # 计算正确率,先将

    # This will NaN if abs of any logit >= 88.
    bad_softmax = unsafe_softmax(logits)
    # This will NaN if max_logit - min_logit >= 88.
    bad_cross_entropies = unsafe_cross_entropy(bad_softmax, labels)
    loss = tf.reduce_mean(bad_cross_entropies)  # 损失函数
    # loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits))
    optimizer = tf.train.GradientDescentOptimizer(0.01)  # 优化器

    tf.add_to_collection("input_tensors", image_input_tensor)  # 添加输入图片张量
    tf.add_to_collection("input_tensors", label_input_tensor)  # 添加标签张量
    tf.add_to_collection("coverage_tensors", logits)  # 输出层的输出
    tf.add_to_collection("metadata_tensors",
                         bad_softmax)  # 输出层经过unsafe_softmax后的输出
    tf.add_to_collection("metadata_tensors",
                         bad_cross_entropies)  # bad_softmax和labels的交叉熵
    tf.add_to_collection("metadata_tensors", logits)  # 输出层的输出

    train_op = optimizer.minimize(loss)  # 训练

    saver = tf.train.Saver(keep_checkpoint_every_n_hours=1)
    sess = tf.Session()
    sess.run(tf.initialize_all_tables())
    sess.run(tf.global_variables_initializer())

    # train classifier on these images and labels
    for idx in range(FLAGS.training_steps):
        sess.run(train_op, feed_dict={x_: images.eval()})
        if idx % 1000 == 0:
            loss_val, accuracy_val = sess.run([loss, accuracy])
            print(idx,
                  ":loss: {}, accuracy: {}".format(loss_val, accuracy_val))
            # print(len(input_values[0]))
            # print(os.path)
            saver.save(
                sess,
                os.path.join(FLAGS.checkpoint_dir, "fuzz_checkpoint"),
                global_step=idx,
            )  # 保存模型
示例#3
0
def main(_):
    """Trains the unstable model."""
    dataset = mnist.train(FLAGS.data_dir)
    dataset = dataset.cache().shuffle(buffer_size=50000).batch(100).repeat()
    iterator = tf.data.make_one_shot_iterator(dataset)
    images, integer_labels = iterator.get_next()
    images = tf.reshape(images, [-1, 28, 28, 1])
    label_input_tensor = tf.identity(integer_labels)
    labels = tf.one_hot(label_input_tensor, 10)
    init_func = tf.random_uniform_initializer(-FLAGS.init_scale,
                                              FLAGS.init_scale)
    logits, image_input_tensor = classifier(images, init_func)
    equality = tf.math.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
    accuracy = tf.math.reduce_mean(tf.cast(equality, tf.float32))

    # This will NaN if abs of any logit >= 88.
    bad_softmax = unsafe_softmax(logits)
    # This will NaN if max_logit - min_logit >= 88.
    bad_cross_entropies = unsafe_cross_entropy(bad_softmax, labels)
    loss = tf.compat.v1.reduce_mean(bad_cross_entropies)
    optimizer = tf.train.GradientDescentOptimizer(0.01)

    tf.compat.v1.add_to_collection("input_tensors", image_input_tensor)
    tf.compat.v1.add_to_collection("input_tensors", label_input_tensor)
    tf.compat.v1.add_to_collection("coverage_tensors", logits)
    tf.compat.v1.add_to_collection("metadata_tensors", bad_softmax)
    tf.compat.v1.add_to_collection("metadata_tensors", bad_cross_entropies)
    tf.compat.v1.add_to_collection("metadata_tensors", logits)

    print('loss: {0}'.format(type(loss)))
    train_op = optimizer.minimize(loss)

    saver = tf.train.Saver(keep_checkpoint_every_n_hours=1)
    sess = tf.compat.v1.Session()
    sess.run(tf.compat.v1.initialize_all_tables())
    sess.run(tf.compat.v1.global_variables_initializer())

    # train classifier on these images and labels
    for idx in range(FLAGS.training_steps):
        sess.run(train_op)
        if idx % 1000 == 0:
            loss_val, accuracy_val = sess.run([loss, accuracy])
            print("loss: {}, accuracy: {}".format(loss_val, accuracy_val))
            saver.save(
                sess,
                os.path.join(FLAGS.checkpoint_dir, "fuzz_checkpoint"),
                global_step=idx,
            )
def main(_):
    """Train a model and a sort-of-quantized version."""

    dataset = mnist.train(FLAGS.data_dir)
    dataset = dataset.cache().shuffle(buffer_size=50000).batch(100).repeat()
    iterator = dataset.make_one_shot_iterator()
    images, integer_labels = iterator.get_next()
    images = tf.reshape(images, [-1, 28, 28, 1])
    images = tf.identity(
        images)  # Return a tensor with the same shape and contents as input.

    # Now we construct the model in kind of a goofy way, because this makes (愚蠢的方式)
    # quantization easier?

    # Sizes of hidden layers
    h_0 = 784
    h_1 = 200
    h_2 = 100
    h_3 = 10

    # Declaring the weight variables
    images_flattened = tf.reshape(images, [-1, h_0])  # 展平图片

    w_fc1 = weight_variable([h_0, h_1])
    b_fc1 = bias_variable([h_1])

    w_fc2 = weight_variable([h_1, h_2])
    b_fc2 = bias_variable([h_2])

    w_fc3 = weight_variable([h_2, h_3])
    b_fc3 = bias_variable([h_3])

    # Constructing the classifier from the weight variables
    h_fc1 = tf.nn.relu(tf.matmul(images_flattened, w_fc1) + b_fc1)
    h_fc2 = tf.nn.relu(tf.matmul(h_fc1, w_fc2) + b_fc2)
    h_fc3 = tf.matmul(h_fc2, w_fc3) + b_fc3

    logits = h_fc3

    # Now I want to construct another classifier w/ quantized weights
    images_quantized = tf.cast(images_flattened, tf.float16)  # 转化数据格式

    w_fc1_quantized = tf.cast(w_fc1, tf.float16)
    b_fc1_quantized = tf.cast(b_fc1, tf.float16)

    w_fc2_quantized = tf.cast(w_fc2, tf.float16)
    b_fc2_quantized = tf.cast(b_fc2, tf.float16)

    w_fc3_quantized = tf.cast(w_fc3, tf.float16)
    b_fc3_quantized = tf.cast(b_fc3, tf.float16)

    # Constructing the classifier from the weight variables
    h_fc1_quantized = tf.nn.relu(
        tf.matmul(images_quantized, w_fc1_quantized) + b_fc1_quantized)
    h_fc2_quantized = tf.nn.relu(
        tf.matmul(h_fc1_quantized, w_fc2_quantized) + b_fc2_quantized)
    h_fc3_quantized = (tf.matmul(h_fc2_quantized, w_fc3_quantized) +
                       b_fc3_quantized)

    logits_quantized = h_fc3_quantized

    labels = tf.one_hot(integer_labels, 10)
    equality = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
    accuracy = tf.reduce_mean(tf.to_float(equality))

    cross_entropies = tf.nn.softmax_cross_entropy_with_logits(logits=logits,
                                                              labels=labels)
    loss = tf.reduce_mean(cross_entropies)
    optimizer = tf.train.GradientDescentOptimizer(0.01)

    tf.add_to_collection("input_tensors", images)
    tf.add_to_collection("coverage_tensors", logits)
    tf.add_to_collection("coverage_tensors", logits_quantized)
    tf.add_to_collection("metadata_tensors", logits)
    tf.add_to_collection("metadata_tensors", logits_quantized)

    train_op = optimizer.minimize(loss)

    saver = tf.train.Saver(keep_checkpoint_every_n_hours=1)
    sess = tf.Session()
    sess.run(tf.initialize_all_tables())
    sess.run(tf.global_variables_initializer())

    # train classifier on these images and labels
    for idx in range(FLAGS.training_steps):
        sess.run(train_op)
        if idx % 100 == 0:
            loss_val, accuracy_val = sess.run([loss, accuracy])
            print("loss: {}, accuracy: {}".format(loss_val, accuracy_val))
            saver.save(
                sess,
                os.path.join(FLAGS.checkpoint_dir, "fuzz_checkpoint"),
                global_step=idx,
            )