Exemplo n.º 1
0
def main():
    tf.compat.v1.disable_eager_execution()
    # data = tf.keras.datasets.mnist
    # (x_train, y_train), (x_test, y_test) = data.load_data()

    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    # plt.imshow(x_train[1], cmap="binary")
    # plt.show()
    sess = tfc.InteractiveSession()
    x = tfc.placeholder("float", shape=[None, 784])
    y_ = tfc.placeholder("float", shape=[None, 10])
    W = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))
    sess.run(tfc.initialize_all_variables())
    y = tf.nn.softmax(tf.matmul(x, W) + b)
    cross_entropy = -tf.reduce_sum(y_ * tfc.log(y))
    train_step = tfc.train.GradientDescentOptimizer(0.01).minimize(
        cross_entropy)
    for i in range(1000):
        batch = mnist.train.next_batch(50)
        train_step.run(feed_dict={x: batch[0], y_: batch[1]})
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

    print(
        "测试集的正确率为",
        accuracy.eval(feed_dict={
            x: mnist.test.images,
            y_: mnist.test.labels
        }))
Exemplo n.º 2
0
def main():

    acl_resource = AclResource()
    acl_resource.init()
    
    mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)
    
    #load model
    model = Model(model_path)
    
    images = mnist.test.images
    labels = mnist.test.labels
    total = len(images)
    correct = 0.0
    
    start = time.time()
    for i in  range(len(images)):
        result = model.execute([images[i]])
        label = labels[i]
        if np.argmax(result[0])==np.argmax(label):
            correct+=1.0
        if i%1000==0:
            print(f'infer {i+1} pics...')
    end = time.time()
    print(f'infer finished, acc is {correct/total}, use time {(end-start)*1000}ms')
Exemplo n.º 3
0
def mst():
    mnist = input_data.read_data_sets("./ST_data/", one_hot=True)

    # 获取图片的特征值
    train_images_data = mnist.train.images  # []可查看某个图的数据

    train_labels_data = mnist.train.labels
    # print(train_images_data)
    # print(train_labels_data)

    # 批次获取50张图片的数据  特征值 + 目标值
    data = mnist.train.next_batch(50)
Exemplo n.º 4
0
def conv_fc():
    # setting data path
    fileName = './2020-01-03/acsset'
    # get data
    mnist = input_data.read_data_sets(fileName, one_hot=True)

    # defining model and get conclusion
    x, y_ture, y_predict = model()

    # 进行交叉熵运算计算损失
    with tf.compat.v1.variable_scope("sotf_cross"):
        # 求平均交叉熵损失
        loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(logits=y_predict,
                                                    labels=y_ture))

    # 梯度下降,求出损失
    with tf.compat.v1.variable_scope("optimizer"):
        train_op = tf.compat.v1.train.GradientDescentOptimizer(0.1).minimize(
            loss)

    # 计算准确率
    with tf.compat.v1.variable_scope("acc"):
        equal_list = tf.equal(tf.argmax(y_ture, 1), tf.argmax(y_predict, 1))
        accuracy = tf.reduce_mean(tf.cast(equal_list, tf.float32))

    # 定义一个初始化变量 op
    init_op = tf.compat.v1.global_variables_initializer()

    # 开启会话运行
    with tf.compat.v1.Session() as sess:
        sess.run(init_op)

        # 循环训练
        for i in range(100):

            # 取出真实的数据
            mnist_x, mnist_y = mnist.train.next_batch(50)

            # run train_op 训练
            sess.run(train_op, feed_dict={x: mnist_x, y_ture: mnist_y})

            print(
                "训练第%d部, 准确率为%f" %
                (i, sess.run(accuracy, feed_dict={
                    x: mnist_x,
                    y_ture: mnist_y
                })))
Exemplo n.º 5
0
def main():
    # 将obs的训练数据拷贝到modelarts
    mox.file.copy_parallel(src_url="obs://canncamps-hw38939615/MNIST_data/",
                           dst_url="MNIST_data")
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    config = tf.ConfigProto()
    custom_op = config.graph_options.rewrite_options.custom_optimizers.add()
    custom_op.name = "NpuOptimizer"
    config.graph_options.rewrite_options.remapping = RewriterConfig.OFF  # 必须显式关闭remap
    sess = tf.Session(config=config)

    batch_size = cfg.BATCH_SIZE
    parameter_path = cfg.PARAMETER_FILE
    lenet = Lenet()
    max_iter = cfg.MAX_ITER

    saver = tf.train.Saver()
    if os.path.exists(parameter_path):
        saver.restore(parameter_path)
    else:
        sess.run(tf.initialize_all_variables())

    for i in range(max_iter):
        batch = mnist.train.next_batch(batch_size)
        if i % 100 == 0:
            train_accuracy, train_loss = sess.run(
                [lenet.train_accuracy, lenet.loss],
                feed_dict={
                    lenet.raw_input_image: batch[0],
                    lenet.raw_input_label: batch[1]
                })
            print("step %d, training accuracy %g, loss is %g" %
                  (i, train_accuracy, train_loss))
        sess.run(lenet.train_op,
                 feed_dict={
                     lenet.raw_input_image: batch[0],
                     lenet.raw_input_label: batch[1]
                 })
    save_path = saver.save(sess, parameter_path)
    print("save model in {}".format(save_path))
    # 将训练好的权重拷回到obs
    mox.file.copy_parallel(src_url="checkpoint/",
                           dst_url="obs://canncamps-hw38939615/ckpt")
Exemplo n.º 6
0
def train():
    # load data(mnist手写数据集)
    mnist = input_data.read_data_sets('MNIST_data', one_hot=True)

    x_data = tf.placeholder(tf.float32, [batch_size, img_size], name="x_data")
    z_prior = tf.placeholder(tf.float32, [batch_size, z_size], name="z_prior")
    keep_prob = tf.placeholder(tf.float32, name="keep_prob")
    global_step = tf.Variable(0, name="global_step", trainable=False)

    # 创建生成模型
    x_generated, g_params = build_generator(z_prior)
    # 创建判别模型
    y_data, y_generated, d_params = build_discriminator(
        x_data, x_generated, keep_prob)

    # 损失函数的设置
    d_loss = -(tf.log(y_data) + tf.log(1 - y_generated))
    g_loss = -tf.log(y_generated)

    optimizer = tf.train.AdamOptimizer(0.0001)

    # 两个模型的优化函数
    d_trainer = optimizer.minimize(d_loss, var_list=d_params)
    g_trainer = optimizer.minimize(g_loss, var_list=g_params)

    init = tf.global_variables_initializer()

    saver = tf.train.Saver()
    # 启动默认图
    sess = tf.Session()
    # 初始化
    sess.run(init)

    if to_restore:
        chkpt_fname = tf.train.latest_checkpoint(output_path)
        saver.restore(sess, chkpt_fname)
    else:
        if os.path.exists(output_path):
            shutil.rmtree(output_path)
        os.mkdir(output_path)

    z_sample_val = np.random.normal(0, 1, size=(batch_size,
                                                z_size)).astype(np.float32)

    steps = 60000 / batch_size
    for i in range(sess.run(global_step), max_epoch):
        for j in np.arange(steps):
            #         for j in range(steps):
            print("epoch:%s, iter:%s" % (i, j))
            # 每一步迭代,我们都会加载256个训练样本,然后执行一次train_step
            x_value, _ = mnist.train.next_batch(batch_size)
            x_value = 2 * x_value.astype(np.float32) - 1
            z_value = np.random.normal(0, 1, size=(batch_size,
                                                   z_size)).astype(np.float32)
            # 执行生成
            sess.run(d_trainer,
                     feed_dict={
                         x_data: x_value,
                         z_prior: z_value,
                         keep_prob: np.sum(0.7).astype(np.float32)
                     })
            # 执行判别
            if j % 1 == 0:
                sess.run(g_trainer,
                         feed_dict={
                             x_data: x_value,
                             z_prior: z_value,
                             keep_prob: np.sum(0.7).astype(np.float32)
                         })
        x_gen_val = sess.run(x_generated, feed_dict={z_prior: z_sample_val})
        show_result(x_gen_val, "output/sample{0}.jpg".format(i))
        z_random_sample_val = np.random.normal(
            0, 1, size=(batch_size, z_size)).astype(np.float32)
        x_gen_val = sess.run(x_generated,
                             feed_dict={z_prior: z_random_sample_val})
        show_result(x_gen_val, "output/random_sample{0}.jpg".format(i))
        sess.run(tf.assign(global_step, i + 1))
        saver.save(sess,
                   os.path.join(output_path, "model"),
                   global_step=global_step)
Exemplo n.º 7
0
from __future__ import print_function

import numpy as np
# import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()

# Import MNIST data
from tensorflow_core.examples.tutorials.mnist import input_data

K = 4
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)

# In this example, we limit mnist data
Xtr, Ytr = mnist.train.next_batch(55000)  # whole training set
Xte, Yte = mnist.test.next_batch(100)  # whole test set

# tf Graph Input
xtr = tf.placeholder("float", [None, 784])
ytr = tf.placeholder("float", [None, 10])
xte = tf.placeholder("float", [784])

# Euclidean Distance
distance = tf.negative(
    tf.sqrt(
        tf.reduce_sum(tf.square(tf.subtract(xtr, xte)), reduction_indices=1)))
# Prediction: Get min distance neighbors
values, indices = tf.nn.top_k(distance, k=K, sorted=False)

nearest_neighbors = []
for i in range(K):
Exemplo n.º 8
0
    def reduce(self, n_dimensions=None, keep_info=None):
        if keep_info:
            normalized_singular_values = self._singular_values / sum(
                self._singular_values)
            info = np.cumsum(normalized_singular_values)
            index = next(idx for idx, value in enumerate(info)
                         if value >= keep_info) + 1
            n_dimensions = index
        with self._graph.as_default():
            sigma = tf.slice(self._sigma, [0, 0],
                             [self._data.shape[1], n_dimensions])
            pca = tf.matmul(self._u, sigma)

        with tf.Session(graph=self._graph) as session:
            return session.run(pca, feed_dict={self._X: self._data})


minist = input_data.read_data_sets('MNIST_data/')

tf_pca = TF_PCA(minist.train.images)
tf_pca.fit()
pca = tf_pca.reduce(keep_info=0.1)  # 保存的信息占比
print('original data shape', minist.train.images.shape)
print('reduced data shape ', pca.shape)
Set = sns.color_palette('Set2', 10)
color_mapping = {key: value for (key, value) in enumerate(Set)}
colors = list(map(lambda x: color_mapping[x], minist.train.labels))
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(pca[:, 0], pca[:, 1], pca[:, 2], c=colors)
plt.show()
Exemplo n.º 9
0
            self.tensors.x: xs,
            self.tensors.y: ys
        })
        return precise

    def save(self):
        self.saver.save(self.session, self.save_path)

    def close(self):
        self.session.close()

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.close()


if __name__ == '__main__':
    path = "MNIST_data"
    ds = read_data_sets(path)
    print(ds.train.num_examples)  # 打印 训练集的数量
    print(ds.test.num_examples)  # 打印 测试集的数量
    print(ds.validation.num_examples)  # 打印 验证集的数量

    save_path = "models/p31/mnist"
    app = MNISTApp(save_path)
    with app:
        app.train(ds)
        app.predict(ds)
Exemplo n.º 10
0
        self.saver.save(self.session, self.config.save_path)

    def predict(self, da):
        precise_totle = 0
        batches = da.test.num_examples // self.config.batch_size
        for batch in range(batches):
            xs, ys = da.test.next_batch(self.config.batch_size)
            precise = self.session.run(self.ts.precise, {
                self.ts.xs: xs,
                self.ts.ys: ys
            })
            precise_totle += precise
        print(f"precise = {precise_totle / batches}")

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.session.close()


if __name__ == '__main__':
    tf.disable_eager_execution()

    config = Config()
    app = App(config)
    da = read_data_sets(config.data_path)
    with app:
        app.train(da)
        app.predict(da)
Exemplo n.º 11
0
# knn
# test 样本找K个最接近样本
# k(100)个样本中,概率最该的样本是?(50个1)那就是1了

import tensorflow as tf
import numpy as np
import random
from tensorflow_core.examples.tutorials.mnist import input_data

# load date
# 数据装载
# input_data.read_data_sets('文件路径',one_hot=bool)
# one_hot=bool 非0即1
fileName = '/Users/liupeng/PycharmProjects/MachineLearning/2020-01-01/acsset'
mnist = input_data.read_data_sets(fileName, one_hot=True)

# 属性设置
# 训练集图片数量
trainNumber = 55000
# 测试集图片数量
testNumber = 10000
# 训练用的图片数量
trainSize = 5000
# 训练用的图片数量
testSize = 5

# k = ?
k = 4

# 数据分解
# 在【0-trainNumber】范围内选取trainSize个数据,不可以重复
Exemplo n.º 12
0
#  导入包
import tensorflow as tf
from tensorflow_core.examples.tutorials.mnist import input_data
from tensorflow.python.framework import graph_util
from npu_bridge.npu_init import *


# 引入minist数据集
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# 每个批次的大小
batch_size = 100
# 计算需要多少批次
n_batch = mnist.train.num_examples // batch_size

output_node_names = "Sigmoid"
output_graph = '/home/ma-user/AscendProjects/MyTraining/models/lenet.pb'

def config():
    session_config = tf.ConfigProto(
        allow_soft_placement=True,
        log_device_placement=False,)
    custom_op = session_config.graph_options.rewrite_options.custom_optimizers.add()
    custom_op.name = "NpuOptimizer"
    custom_op.parameter_map["use_off_line"].b = True     # True表示在昇腾AI处理器上执行训练
    session_config.graph_options.rewrite_options.remapping = RewriterConfig.OFF  # 必须显式关闭
    session_config.graph_options.rewrite_options.memory_optimization = RewriterConfig.OFF  # 必须显式关闭
    return session_config

#定义初始化函数
# 初始化权值
def weight_variable(shape):
Exemplo n.º 13
0
    def get_feed_dict(self, ds):
        values = ds.next_batch(self.config.batch_size)  # xs,ys
        return {tensor: value for tensor, value in zip(self.ts.inputs, values)}

    def save(self):
        self.saver.save(self.session, save_path=self.config.save_path)

    def test(self, ds_test):
        pass

    def close(self):
        self.session.close()

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.close()


if __name__ == '__main__':
    cfg = Config()
    cfg.from_cmd()
    print(cfg)

    ds = read_data_sets(cfg.simple_path)
    app = cfg.get_app()
    with app:
        app.train(ds_train=ds.train, ds_validation=ds.validation)
        app.test(ds_test=ds.test)
Exemplo n.º 14
0
def full_connected():

    mnist = input_data.read_data_sets("./MNIST_data/", one_hot=True)

    # 1. 建立数据占位符
    with tf.variable_scope("data"):
        x = tf.placeholder(tf.float32, [None, 784])
        y_true = tf.placeholder(tf.int32, [None, 10])

    # 2. 建立一个全链接层的神经网络
    with tf.variable_scope("fc_model"):
        # 随机初始化权重和偏置
        weight = tf.Variable(tf.random_normal([784, 10], mean=0.0, stddev=1.0),
                             name="w")

        bias = tf.Variable(tf.constant(0.0, shape=[10]))

        # 预测None个样本的输出结果 matrix [None, 784] * [784, 10] + [10] = [None, 10}
        y_predict = tf.matmul(x, weight) + bias

    # 3. 求出所以样本的损失值 然后求平均值
    with tf.variable_scope("soft_cross"):
        # 求平均交叉熵损失
        loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(labels=y_true,
                                                    logits=y_predict))

    # 4. 梯度下降求出损失
    with tf.variable_scope("optimizer"):
        train_op = tf.train.GradientDescentOptimizer(0.1).minimize(loss)

    # 5. 计算准确率
    with tf.variable_scope("acc"):
        equal_list = tf.equal(tf.argmax(y_true, 1), tf.argmax(y_predict, 1))

        # equal_list  None个样本  [1, 0, 0, 1, 1, ...]
        accuracy = tf.reduce_mean(tf.cast(equal_list, tf.float32))

    # 收集变量 单个数字值收集
    tf.summary.scalar("losses", loss)
    tf.summary.scalar("acc", accuracy)

    # 高维度变量收集
    tf.summary.histogram("weights", weight)
    tf.summary.histogram("biases", bias)

    # 定义一个初始化变量的op
    init_op = tf.global_variables_initializer()

    # 定义一个合并变量的op
    merged = tf.summary.merge_all()

    # 开启会话 去训练
    with tf.compat.v1.Session() as sess:
        # 初始化变量
        sess.run(init_op)

        # 建立events文件 然后写入
        file_writer = tf.summary.FileWriter("./tmp/", graph=sess.graph)

        # 迭代步数去训练 更新参数预测
        for i in range(2000):
            # 取出真实存在的特征值和目标值
            mnist_x, mnist_y = mnist.train.next_batch(50)

            # 运行train_op训练
            sess.run(train_op, feed_dict={x: mnist_x, y_true: mnist_y})

            # 写入每步训练的值
            summary = sess.run(merged, feed_dict={x: mnist_x, y_true: mnist_y})

            file_writer.add_summary(summary, i)

            print("训练第{}步, 准确率为:{}".format(
                i, sess.run(accuracy, feed_dict={
                    x: mnist_x,
                    y_true: mnist_y
                })))
    mean = app.session.run(app.ts.final_mean)
    print(mean)
    msd = app.session.run(app.ts.final_msd)
    print(msd)
    std = np.sqrt(msd - mean**2)
    print(std)

    vec = np.random.normal(mean, std, [sample, len(std)])
    images = app.session.run(app.ts.y, {app.ts.vec: vec})  # [-1, 28, 28]
    # 将所有图片做成一张图片,每行20张小图
    images = np.reshape(images, [-1, col, 28, 28])  # [-1, 20, 28, 28]
    images = np.transpose(images, [0, 2, 1, 3])  # [-1, 28, 20, 28]
    images = np.reshape(images, [-1, col * 28])  # [-1, 28, 20 * 28]
    # images = np.transpose(images, [2, 0, 1]) # [20 * 28, -1, 28]
    # images = np.reshape(images, [col * 28, -1]) # [20 * 28, -1]
    # images = np.transpose(images, [1, 0]) # [-1, 20 * 28]
    cv2.imwrite(path, images * 255)


if __name__ == '__main__':
    tf.disable_eager_execution()
    tf.reset_default_graph()

    config = MyConfig()
    ds = read_data_sets(config.simple_path)
    app = myf.App(config)
    with app:
        app.train(ds_train=MyDS(ds.train, config),
                  ds_validation=MyDS(ds.validation, config))
        # predict(app, config.batch_size, config.image_path, config.col)
Exemplo n.º 16
0
    def mnist():
        mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

        nb_classes = 10

        X = tf.placeholder(tf.float32, [None, 784])
        Y = tf.placeholder(tf.float32, [None, nb_classes])

        W = tf.Variable(tf.random_normal([784, nb_classes]))
        b = tf.Variable(tf.random_normal([nb_classes]))

        hypothesis = tf.nn.softmax(tf.matmul(X, W) + b)

        cost = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(hypothesis), axis=1))
        train = tf.train.GradientDescentOptimizer(
            learning_rate=0.1).minimize(cost)

        is_correct = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
        accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))

        num_epochs = 15
        batch_size = 100
        num_iterations = int(mnist.train.num_examples / batch_size)

        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            for epoch in range(num_epochs):
                avg_cost = 0

                for i in range(num_iterations):
                    batch_xs, batch_ys = mnist.train.next_batch(batch_size)
                    _, cost_val = sess.run([train, cost],
                                           feed_dict={
                                               X: batch_xs,
                                               Y: batch_ys
                                           })
                    avg_cost += cost_val / num_iterations

                print("Epoch: {:04d}, Cost: {:.9f}".format(
                    epoch + 1, avg_cost))

            print("Learning finished")

            print(
                "Accuracy: ",
                accuracy.eval(session=sess,
                              feed_dict={
                                  X: mnist.test.images,
                                  Y: mnist.test.labels
                              }),
            )

            r = random.randint(0, mnist.test.num_examples - 1)
            print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r + 1],
                                                1)))
            print(
                "Prediction: ",
                sess.run(tf.argmax(hypothesis, 1),
                         feed_dict={X: mnist.test.images[r:r + 1]}),
            )

            plt.imshow(
                mnist.test.images[r:r + 1].reshape(28, 28),
                cmap="Greys",
                interpolation="nearest",
            )
            plt.show()
Exemplo n.º 17
0
#!/usr/bin/python3.6
"""
@Author: xiaxianyi<*****@*****.**>
@Time: 2021/4/5 9:11
@File: test.py
Description:
"""
import tensorflow as tf
from Inference import inference
import tensorflow_core.examples.tutorials.mnist.input_data as input_data

if __name__ == '__main__':
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=False)
    infer = inference()
    test_data = mnist.test
    num_examples = test_data._num_examples
    count = 0
    for i in range(num_examples):
        batch = mnist.train.next_batch(1)
        pred = infer.predict(batch[0])
        label = batch[1]
        if pred[0] == label[0]:
            count += 1
    print("test data accuracy: ", count / 10000)
Exemplo n.º 18
0
import tensorflow as tf
from tensorflow_core.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
batch_size = 200
n_batch = mnist.train.num_examples // batch_size

x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])
keep_prob = tf.placeholder(tf.float32)
lr = tf.Variable(0.001, dtype=tf.float32)

"""
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
prediction = tf.nn.softmax(tf.matmul(x, W)+b)
"""

W1 = tf.Variable(tf.truncated_normal([784, 500], stddev=0.1))
b1 = tf.Variable(tf.zeros([500])+0.1)
prediction1 = tf.nn.tanh(tf.matmul(x, W1)+b1)
prediction1_drop = tf.nn.dropout(prediction1, keep_prob)

W2 = tf.Variable(tf.truncated_normal([500, 250], stddev=0.1))
b2 = tf.Variable(tf.zeros([250])+0.1)
prediction2 = tf.nn.tanh(tf.matmul(prediction1_drop, W2)+b2)
prediction2_drop = tf.nn.dropout(prediction2, keep_prob)

W3 = tf.Variable(tf.truncated_normal([250, 10], stddev=0.1))
b3 = tf.Variable(tf.zeros([10])+0.1)
prediction = tf.nn.softmax(tf.matmul(prediction2_drop, W3)+b3)
Exemplo n.º 19
0
import tensorflow as tf
import numpy as np
from scipy import stats
from tensorflow_core.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets("./data", one_hot=True)

X_train, y_train = mnist.train.next_batch(5000)  # [5000,784],[5000,10]
X_test, y_test = mnist.test.next_batch(100)  # [100,784], [100,10]

k = 3

with tf.name_scope("Inputs"):
    target_x = tf.placeholder("float", [1, 784],
                              name='test_x')  # target vector
    X = tf.placeholder(
        "float", [None, 784])  # matrix of observations to compare to target
    y = tf.placeholder("float", [None, 10])  # matrix of one-hot class vectors

with tf.name_scope("Distance_layer"):
    with tf.name_scope("L1-distace"):
        l1_dist = tf.reduce_sum(
            tf.abs(tf.subtract(X, target_x)), 1
        )  # euclidean distance. the sum of squared differences between elements, row-wise.
    with tf.name_scope("L2-distance"):
        l2_dist = tf.sqrt(
            tf.reduce_sum(tf.square(tf.subtract(X, target_x)), 1)
        )  # euclidean distance. the sum of squared differences between elements, row-wise.

    # nn = tf.argmin(l1_dist, 0)
    with tf.name_scope("k-nearest"):