def linear_regression():
    """
    简单的线性回归模型
    :return:
    """
    X, Y, w, b = get_model()
    loss = get_loss(X, Y, w, b)
    optimizer = get_optimizer(loss)

    linear_ops = tf.global_variables_initializer()

    X_train, Y_train, n_samples = loaddata()
    total = []
    with tf.Session() as sess:
        sess.run(linear_ops)
        writer = tf.summary.FileWriter("linear_regression", sess.graph)
        for i in range(100):
            total_loss = 0
            for x, y in zip(X_train, Y_train):
                _, l = sess.run([optimizer, loss], feed_dict={X: x, Y: y})
                total_loss += l
            total.append(total_loss / n_samples)
            logger.info("epoll {0} loss {1}".format(i, total_loss / n_samples))
        writer.close()
        w_val, b_val = sess.run([w, b])
        logger.info("w {0},b {1}".format(w_val, b_val))
        writer.close()

        show_data(X_train, Y_train, w_val, b_val)

        # 查看损失变化图
        show_loss(total)
示例#2
0
def house_prise():
    """
    使用线性回归估算房价
    :return:
    """

    # 特征
    X = nd.array([[120, 2], [100, 1], [130, 3]])
    logger.info(nd.norm(X, axis=0))
    # 值
    lables = nd.array([130, 98, 140])
    logger.info(nd.norm(lables, axis=0))

    # 权重 偏差
    w = nd.random.normal(scale=0.01, shape=(2, 1))
    b = nd.zeros(shape=(1, ))

    w.attach_grad()
    b.attach_grad()

    for i in range(5):
        for x, y in data_iter(10, X, lables):
            with autograd.record():
                l = squared_loss(linreg(x, w, b), y)
                logger.info(l.mean().asnumpy())
            l.backward()
            sgd([w, b], 0.02, 10)

    logger.info(w)
    logger.info(b)
示例#3
0
def mnist_dnn():
    """
    手写数字keras识别
    :return:
    """
    mnist = keras.datasets.mnist
    (x_train, y_train), (x_test, y_test) = mnist.load_data(os.path.join(root_path, "data", "mnist", "mnist.npz"))
    x_train, x_test = x_train / 255.0, x_test / 255.0

    model = keras.models.Sequential([
        keras.layers.Flatten(input_shape=(28, 28)),
        keras.layers.Dense(128, activation='relu'),
        keras.layers.Dropout(0.2),
        keras.layers.Dense(10, activation='softmax')
    ])
    model.summary()

    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'], )

    model.fit(x_train, y_train, epochs=5, verbose=1, validation_split=0.2)

    test_loss, test_acc = model.evaluate(x_test, y_test, verbose=1)

    logger.info('\nTest accuracy: {0} {1}'.format(test_loss, test_acc))
示例#4
0
def plt_perceptron():
    # 定义sign函数
    def sign(X, W, b):
        return np.dot(W, X) + b

    # 遍历数据集
    def train(X, Y, W, b, learning_rate=0.1):
        for i in range(len(X)):
            if (Y[i] * sign(X[i], W, b) <= 0):
                W = W + learning_rate * Y[i] * X[i]
                b = b + learning_rate * Y[i]
        return W, b

    # 将数据集遍历1000遍,每100次打印一下W, b值
    W = np.zeros([1, 2])
    b = 0
    X, Y = get_train_data()
    for i in range(1000):
        W, b = train(X, Y, W, b)
        if (i % 100 == 0): logger.info("count={0} w={1} b={2}".format(i, W, b))

    x_ = np.linspace(4, 7, 10)
    y_ = -(W[0][0] * x_ + b) / W[0][1]
    plt.plot(x_, y_)
    plt.scatter(X[:50, 0], X[:50, 1], label='0')
    plt.scatter(X[50:100, 0], X[50:100, 1], label='1')
    plt.xlabel('sepal length')
    plt.ylabel('sepal width')
    plt.legend()
    plt.show()
示例#5
0
def mnist_dnn2():
    mnist = keras.datasets.mnist
    (x_train, y_train), (x_test, y_test) = mnist.load_data(os.path.join(root_path, "data", "mnist", "mnist.npz"))
    x_train, x_test = x_train / 255.0, x_test / 255.0

    x_train = np.reshape(x_train, [-1, 28, 28])
    x_test = np.reshape(x_test, [-1, 28, 28])
    y_train = np_utils.to_categorical(y_train, 10)
    y_test = np_utils.to_categorical(y_test, 10)

    model = keras.models.Sequential()
    model.add(keras.layers.Dense(128, input_shape=(28, 28)))
    model.add(keras.layers.Activation("relu"))
    model.add(keras.layers.Dropout(0.2))

    model.add(keras.layers.Dense(64))
    model.add(keras.layers.Activation("relu"))
    model.add(keras.layers.Dropout(0.2))

    # model.add(keras.layers.Flatten())
    model.add(keras.layers.Reshape((-1,)))
    model.add(keras.layers.Dense(10))
    model.add(keras.layers.Activation("softmax"))

    model.summary()

    model.compile(optimizer=keras.optimizers.RMSprop(lr=0.001), loss="categorical_crossentropy", metrics=["accuracy"])

    model.fit(x_train, y_train, batch_size=32, epochs=10, verbose=1, validation_split=0.2)

    test_loss, test_accuracy = model.evaluate(x_test, y_test, batch_size=32, verbose=1)
    logger.info("\ntest_loss:{0},test_accuracy:{1}".format(test_loss, test_accuracy))
示例#6
0
def kmean_iris_distances(epochs=10):
    """
     # 平方误差和(SSE),随着簇数量 k 的增加,SSE 是逐渐减小的
    :return:
    """
    iris = datasets.load_iris()
    X, Y = iris.data[:, :2], iris.target

    def input_fn():
        return tf.constant(np.array(X), tf.float32, X.shape), None

    distances = []
    for epoch in range(1, epochs):
        kmeans = KMeansClustering(num_clusters=epoch,
                                  relative_tolerance=0.0001,
                                  random_seed=2)
        kmeans.fit(input_fn=input_fn)

        sum_distances = kmeans.score(input_fn=input_fn, steps=100)
        logger.info(sum_distances)
        distances.append(sum_distances)

    plt.plot(list(range(1, epochs)), distances)
    plt.xlabel("epoch")
    plt.ylabel("distance")
    plt.show()
示例#7
0
def keras_imdb_gru(num_words=None, maxlen=None, embedding_dim=128):
    """
    imdb影评二分类,使用gru门控循环单元进行分类
    :return:
    """
    (x_train, y_train), (x_test, y_test) = imdb.load_data(
        os.path.join(root_path, "data", "imdb", "imdb.npz"), num_words=num_words)

    if not num_words: num_words = max(max([max(x) for x in x_train]), max([max(x) for x in x_test])) + 1
    if not maxlen: maxlen = max(max([len(x) for x in x_train]), max([len(x) for x in x_test])) + 1

    x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
    x_test = sequence.pad_sequences(x_test, maxlen=maxlen)

    model = keras.models.Sequential()
    model.add(keras.layers.Embedding(num_words + 1, embedding_dim, input_length=maxlen))
    model.add(keras.layers.GRU(128))
    model.add(keras.layers.Dense(1, activation='sigmoid'))

    model.summary()
    model.compile(optimizer="rmsprop", loss='binary_crossentropy', metrics=['accuracy'])

    history = model.fit(x_train, y_train, epochs=10, batch_size=64, validation_split=0.2, verbose=1)
    keras_history_plotcurve(history)

    # 评估模型
    test_loss, test_accuracy = model.evaluate(x_test, y_test, batch_size=64, verbose=0)
    logger.info("\ntest_loss:{0},test_accuracy:{1}".format(test_loss, test_accuracy))
示例#8
0
def leave_one_out(X_train,
                  y_train,
                  num_epochs,
                  learning_rate,
                  weight_decay,
                  batch_size,
                  net=None):
    """
    留一法(Leave-One-Out)是S折交叉验证的一种特殊情况,当S=N时交叉验证便是留一法,其中N为数据集的大小。该方法往往比较准确,但是计算量太大,比如数据集有10万个样本,那么就需要训练10万个模型
    :return:
    """
    logger.info("开始留一发模型校验")
    train_l_sum, valid_l_sum = 0, 0

    # 计算样本数量
    k = len(X_train)
    if not net: net = get_net()
    for i in range(k):
        data = get_k_fold_data(k, i, X_train, y_train)
        train_ls, valid_ls = train(net, *data, num_epochs, learning_rate,
                                   weight_decay, batch_size)
        train_l_sum += train_ls[-1]
        valid_l_sum += valid_ls[-1]
        logger.info('fold %d, train rmse %f, valid rmse %f' %
                    (i, train_ls[-1], valid_ls[-1]))
    return train_l_sum / k, valid_l_sum / k
示例#9
0
def control_grad():
    """
    即使函数的计算图包含了Python的控制流(如条件和循环控制)
    :return:
    """
    def f(a):
        """
        f(a) = x * a
        :param a:
        :return:
        """
        b = a * 2
        while b.norm().asscalar() < 1000:
            b = b * 2
        if b.sum().asscalar() > 0:
            c = b
        else:
            c = 100 * b
        return c

    a = nd.random.normal(shape=1)
    logger.info("autograd a:")
    logger.info(a)

    a.attach_grad()
    with autograd.record():
        c = f(a)
        logger.info(c)
    c.backward()

    logger.info("autograd a.grad:")
    logger.info(a.grad)
示例#10
0
def restore():
    v = tf.Variable(tf.random_gamma(shape=(3, 3), alpha=10))
    initial_op = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(initial_op)
        logger.info(v)
        saver = tf.train.Saver()
        saver.restore(sess, save_path="./saver")
示例#11
0
def linspace():
    """
    等量补差
    :return:
    """
    l = tf.linspace(1., 10., 100)
    with tf.Session() as sess:
        logger.info(sess.run(l))
示例#12
0
 def classify_image(image_path, model):
     img = image_load_and_convert(image_path, model)
     Network = MODELS[model][0]
     model = Network(weights="imagenet")
     predicts = model.predict(img)
     p = imagenet_utils.decode_predictions(predicts)
     for i, (imagenetId, label, prob) in enumerate(p[0]):
         logger.info("{0} : {1}: {2}".format(i, label, prob * 100))
示例#13
0
def tensorboard():
    """
    启动tensorvoard
    :return:
    """
    stdin, stdout = os.popen("tensorboard --logdir linear_regression")
    for line in stdout.readlines():
        logger.info(line)
示例#14
0
def interactive_add():
    sess = tf.InteractiveSession()
    a = tf.constant(1, np.float32)
    b = tf.constant(2, np.float32)

    c = tf.add(a, b)
    logger.info(c.eval())
    logger.info(a.eval())
    sess.close()
示例#15
0
def multiple_perceptron_mnist(epochs=10, batch_size=1000, learning_rate=0.01, hidden=30):
    """
    mnis手写数字识别
    输入层被称为第零层,因为它只是缓冲输入。存在的唯一一层神经元形成输出层。输出层的每个神经元都有自己的权重和阈值。当存在许多这样的层时,网络被称为多层感知机(MLP)。MLP有一个或多个隐藏层。这些隐藏层具有不同数量的隐藏神经元。每个隐藏层的神经元具有相同的激活函数:
    MLP 也被称为全连接层。MLP 中的信息流通常是从输入到输出,目前没有反馈或跳转,因此这些网络也被称为前馈网络。
    :return:
    """
    mnist_path = os.path.join(root_path, "data", "fashionMNIST")
    mnist_data = input_data.read_data_sets(mnist_path, one_hot=True)
    train_data = mnist_data.train
    test_data = mnist_data.test

    sample_count = train_data.num_examples  # 55000
    feature_count = train_data.images.shape[1]  # 784

    label_count = train_data.labels.shape[1]  # 10

    X = tf.placeholder(dtype=tf.float32, shape=(None, feature_count), name="X")
    Y = tf.placeholder(dtype=tf.float32, shape=(None, label_count), name="Y")

    with tf.name_scope("layer"):
        layer1 = layers.fully_connected(X, hidden, activation_fn=tf.nn.relu, scope="layer1")
        layer2 = layers.fully_connected(layer1, 256, activation_fn=tf.nn.relu, scope="layer2")
        Y_that = layers.fully_connected(layer2, label_count, activation_fn=None, scope="layerout")
    with tf.name_scope("cross_entropy"):
        loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=Y_that, name="loss"))
        tf.summary.scalar("cross_entropy", loss)
    with tf.name_scope("train"):
        optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)

    init = tf.global_variables_initializer()
    summary_ops = tf.summary.merge_all()

    with tf.Session() as sess:
        sess.run(init)
        writer = tf.summary.FileWriter("multiple_perceptron", graph=sess.graph)
        total = []
        for i in range(epochs):
            total_loss = 0
            batch_count = sample_count // batch_size
            for j in range(batch_count):
                batch_trains, batch_lables = mnist_data.train.next_batch(batch_size)
                _, l, summary_str = sess.run([optimizer, loss, summary_ops],
                                             feed_dict={X: batch_trains, Y: batch_lables})
                writer.add_summary(summary_str, i * batch_size + j)
                total_loss += l
            logger.info("epoll {0} loss {1}".format(i, total_loss / batch_count))
            total.append(total_loss / batch_count)
        writer.close()

        # 模型评估
        correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_that, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, dtype=tf.float32))
        logger.info("accuracy %s" % sess.run(accuracy, feed_dict={X: test_data.images, Y: test_data.labels}))

        plt.plot(total)
        plt.show()
示例#16
0
async def hello(greeting: str = "Hello",
                name: str = "World") -> Dict[str, str]:
    """
    Returns greeting message.
    """
    message = f"{greeting} {name}!"
    response = {"message": message}
    logger.info("hello response", extra={"response": response})
    return response
示例#17
0
def super_variables(epochs=10, min_loss=0.01, batch_size=200, hidden=30):
    """
    超参数
    :return:
    """

    boston_data = datasets.load_boston()
    X_train, X_test, y_train, y_test = train_test_split(boston_data.data,
                                                        boston_data.target,
                                                        test_size=0.3,
                                                        random_state=42)
    # 数据归一化
    minmax_scaler = MinMaxScaler()
    X_train = minmax_scaler.fit_transform(X_train)
    X_test = minmax_scaler.fit_transform(X_test)
    y_train = minmax_scaler.fit_transform(np.reshape(y_train,
                                                     newshape=(-1, 1)))
    y_test = minmax_scaler.fit_transform(np.reshape(y_test, newshape=(-1, 1)))

    n_samples, n_feature = X_train.shape

    model = Sequential()
    model.add(Dense(hidden, input_dim=n_feature, activation="relu"))
    model.add(Dense(1, activation="sigmoid"))
    model.summary()
    model_json = "{}"
    with tf.Session() as sess:
        # saver = tf.train.Saver()
        # save_path = saver.save(sess, "model.ckpt")
        # logger.info("Model saved in %s" % save_path)
        for epoch in range(epochs):
            batch_count = len(X_train) // batch_size
            for i in range(batch_count):
                model.compile(
                    optimizer="adam",
                    loss="mean_squared_error",
                )
                model.fit(X_train,
                          y_train,
                          validation_data=(X_test, y_test),
                          epochs=epochs,
                          batch_size=batch_size,
                          verbose=1)
                y_test_pred = model.predict(X_test)
                rmse = mean_squared_error(np.reshape(y_test, (-1, )),
                                          np.reshape(y_test_pred,
                                                     (-1, ))).eval()
                logger.info(rmse)
                if rmse < min_loss:
                    min_loss = rmse
                    model_json = model.to_json()
        with open("model.json", "w") as json_file:
            json_file.write(model_json)
            model.save_weights("model.hdf5")
            logger.info("Saved model to disk")
示例#18
0
def normal():
    """
    它的每个元素都随机采样于均值为0、标准差为1的正态分布。nd.sqrt(nd.power(a, 2).sum())
    :return:
    """
    n = nd.normal(0, 1, shape=(2, 2))
    logger.info(n)

    a = nd.array([1, 2, 3, 4])
    print(a.norm())
    print(nd.sqrt(nd.power(a, 2).sum()))
示例#19
0
def matrix_svd():
    """
    奇异值分解
    :return:
    """
    isses = tf.InteractiveSession()
    A = tf.Variable(tf.random_normal(shape=(4, 4)))
    A.initializer.run()

    logger.info("A\n%s" % A.eval())
    logger.info("tf.svd(A)\n {0}".format(tf.svd(A)))
    isses.close()
示例#20
0
def get_similar_tokens(query_token, k, embed):
    """
    求近义词
    :param query_token: 去查单词
    :param k: 返回多少相似单词
    :param embed: 词典
    :return:
    """
    topk, cos = knn(embed.idx_to_vec, embed.get_vecs_by_tokens([query_token]),
                    k + 1)
    for i, c in zip(topk[1:], cos[1:]):  # 除去输入词
        logger.info('cosine sim=%.3f: %s' % (c, (embed.idx_to_token[i])))
示例#21
0
def matrix_inverse():
    """
    求解可逆方阵的逆
    :return:
    """
    isses = tf.InteractiveSession()
    A = tf.Variable(tf.random_normal(shape=(4, 4)))
    A.initializer.run()

    logger.info("A\n%s" % A.eval())
    logger.info("tf.matrix_inverse(A)\n%s" % tf.matrix_inverse(A).eval())
    isses.close()
示例#22
0
def matrix_diag():
    """
    qr分解
    :return:
    """
    isses = tf.InteractiveSession()
    # 对角值
    a = tf.constant([1, 2, 3, 4])

    logger.info("a\n%s" % a.eval())
    logger.info("tf.diag(a)\n {0}".format(tf.diag(a).eval()))
    isses.close()
示例#23
0
def zero_onse():
    c = tf.constant(0, dtype=tf.float32, shape=[3, 3])
    cc = tf.zeros(shape=[3, 3], dtype=tf.float32)
    cl = tf.zeros_like(c)
    zz = tf.ones(dtype=tf.float32, shape=[3, 3])
    ol = tf.ones_like(zz)
    with tf.Session() as sess:
        logger.info(sess.run(c))
        logger.info(sess.run(cc))
        logger.info(sess.run(cl))
        logger.info(sess.run(zz))
        logger.info(sess.run(ol))
示例#24
0
def get_analogy(tokens, embed):
    """
    求类比词
    :param tokens: 词汇
    :param embed: 词典
    :return:
    """
    vecs = embed.get_vecs_by_tokens(tokens)
    x = vecs[1] - vecs[0] + vecs[2]
    topk, cos = knn(embed.idx_to_vec, x, 1)
    analogy = embed.idx_to_token[topk[0]]
    logger.info(analogy)
    return analogy
示例#25
0
def variable():
    """
    变量必须初始化
    :return:
    """
    v = tf.Variable(tf.random_normal(shape=(3, 3)))

    # 初始化 所有的变量
    initial_op = tf.global_variables_initializer()

    with tf.Session() as sess:
        sess.run(initial_op)
        logger.info(v)
示例#26
0
def matrix_determinant():
    """
    计算方阵行列式的值
    :return:
    """
    isses = tf.InteractiveSession()
    A = tf.Variable(tf.random_normal(shape=(4, 4)))
    A.initializer.run()

    logger.info("A\n%s" % A.eval())
    logger.info("tf.matrix_determinant(A)\n%s" %
                tf.matrix_determinant(A).eval())
    isses.close()
def multiple_linear_regression():
    """
    多特征的线性回归模型
    :return:
    """
    boston_data = datasets.load_boston()
    X_train, Y_train = boston_data.data, boston_data.target
    n_samples = len(X_train)

    # 将偏差作为一列特征添加进去
    X_train = np.c_[X_train, np.zeros(shape=(n_samples, 1))]
    n_features = X_train.shape[1]

    X_train = (X_train - np.mean(X_train)) / np.std(X_train)
    Y_train = np.reshape(Y_train, newshape=(n_samples, 1))
    # TODO 暂时不考虑偏差,考虑偏差就是把偏差也当做一个特征值

    X = tf.placeholder(dtype=tf.float32,
                       shape=(n_samples, n_features),
                       name="X")
    Y = tf.placeholder(dtype=tf.float32, shape=(n_samples, 1), name="Y")

    w = tf.Variable(tf.random_normal(shape=(n_features, 1)), name="w")

    Y_that = tf.matmul(X, w)
    loss = tf.reduce_mean(tf.square(Y - Y_that), name="loss")
    optimizer = tf.train.GradientDescentOptimizer(
        0.001, name="optimizer").minimize(loss)

    linear_ops = tf.global_variables_initializer()
    total = []
    with tf.Session() as sess:
        sess.run(linear_ops)
        writer = tf.summary.FileWriter("multiplinear_regression", sess.graph)
        for i in range(1000):
            _, total_loss = sess.run([optimizer, loss],
                                     feed_dict={
                                         X: X_train,
                                         Y: Y_train
                                     })
            total.append(total_loss)
            logger.info("epoll {0} loss {1}".format(i, total_loss / n_samples))
        writer.close()
        w_value = sess.run(w)
        writer.close()

        show_loss(total)
        # 预测
        n = 500
        Y_pred = np.matmul(X_train[n, :], w_value)
        logger.info("pred {0} real {1}".format(Y_pred[0], Y_train[n][0]))
示例#28
0
def fashionmnist_dnn():
    """
    图片分类
    :return:
    """
    fashionmnist = input_data.read_data_sets(os.path.join(
        root_path, "data", "fashionMNIST"),
                                             one_hot=True)

    train_images, train_labels = fashionmnist.train.images, fashionmnist.train.labels
    test_images, test_labels = fashionmnist.test.images, fashionmnist.test.labels
    validation_images, validation_labels = fashionmnist.validation.images, fashionmnist.validation.labels

    train_labels = np.argmax(train_labels, 1)
    test_labels = np.argmax(test_labels, 1)

    shapesize = int(np.math.sqrt(train_images.shape[1]))
    train_images = np.reshape(train_images,
                              newshape=(-1, shapesize, shapesize))
    test_images = np.reshape(test_images, newshape=(-1, shapesize, shapesize))
    validation_images = np.reshape(validation_images,
                                   newshape=(-1, shapesize, shapesize))

    model = keras.models.Sequential([
        keras.layers.Flatten(input_shape=(28, 28)),
        keras.layers.Dense(64),
        keras.layers.Activation("relu", name="relu"),
        keras.layers.Dense(10),
        keras.layers.Activation("softmax", name="softmax"),
    ])

    model.summary()

    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    model.fit(train_images,
              train_labels,
              epochs=10,
              validation_data=(validation_images, validation_labels))

    test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
    logger.info('Test accuracy: {0}'.format(test_acc))

    predictions = model.predict(test_images)
    predictions = np.argmax(predictions, 1)
    for i in range(10):
        logger.info("predict:{0} , label:{1}".format(predictions[i],
                                                     test_labels[i]))
def k_linear_regression():
    """
    k折交叉验证发
    :return:
    """
    from sklearn.model_selection import KFold
    X, Y, w, b = get_model()
    loss = get_loss(X, Y, w, b)
    optimizer = get_optimizer(loss)

    linear_ops = tf.global_variables_initializer()

    X_train, Y_train, n_samples = loaddata()
    total = []
    n_splits = 5

    with tf.Session() as sess:
        sess.run(linear_ops)
        writer = tf.summary.FileWriter("linear_regression", sess.graph)
        for i in range(10):
            total_train_loss = 0
            total_test_loss = 0

            k_fold = KFold(n_splits=n_splits, shuffle=False, random_state=None)
            for train_index, test_index in k_fold.split(X_train, Y_train):
                # 训练数据
                for x, y in zip(X_train[train_index], Y_train[train_index]):
                    _, l = sess.run([optimizer, loss], feed_dict={X: x, Y: y})
                    total_train_loss += l / len(train_index)

                # 测试数据 计算评估误差
                w_val, b_val = sess.run([w, b])
                test_loss = get_loss(X_train[test_index], Y_train[test_index],
                                     w_val, b_val)
                total_test_loss += tf.reduce_mean(test_loss).eval()

            total.append(total_train_loss / n_splits)
            logger.info("epoll {0} train loss {1} test loss {2}".format(
                i, total_train_loss / n_splits, total_test_loss / n_splits))
        writer.close()
        w_val, b_val = sess.run([w, b])
        logger.info("w {0},b {1}".format(w_val, b_val))
        writer.close()

        # 对比预估值和真实值
        show_data(X_train, Y_train, w_val, b_val)

        # 查看损失变化图
        show_loss(total)
示例#30
0
def matrix_condition():
    """
    :return:
    """
    isses = tf.InteractiveSession()
    # 对角值
    X = tf.constant([5., 1., 7., 2., 3., 4., 1., 3.], dtype=tf.float32)
    logger.info("X\n%s" % X.eval())
    logger.info("tf.argmax(X)\n {0}".format(tf.argmax(X).eval()))
    logger.info("tf.argmin(X)\n {0}".format(tf.argmin(X).eval()))
    logger.info("tf.unique(X)\n {0}".format(tf.unique(X)))
    logger.info(
        "tf.where(tf.equal(X,tf.constant(1,dtype=tf.float32)))\n {0}".format(
            tf.where(tf.equal(X, tf.constant(1, dtype=tf.float32))).eval()))
    isses.close()