def test_lookup_positives(self): scores = constant([[1., 0., 2.], [0., 1., 1.]]) clicks = constant([[2], [1]]) expected = [2., 1.] positive_scores = lookup_positives(scores, clicks) with self.test_session(): self.assertListEqual(positive_scores.eval().tolist(), expected)
def test_predict_scores(self): features = { 'anchor_label': constant([[0., 1], [1., 0.]]), 'label': constant([[[0., 1.], [0., 1.], [0., 1.]], [[1., 0.], [1., 0.], [1., 0.]]]) } p = predict_scores(features) with self.test_session() as sess: sess.run(global_variables_initializer()) print(p.eval())
def test_create_diffs(self): scores = constant([[1., 0., 2.], [0., 1., 1.]]) positive_scores = [2., 1.] expected = [[1., 2., 0.], [1., 0., 0.]] diffs = create_diffs(positive_scores, scores) with self.test_session(): self.assertListEqual(diffs.eval().tolist(), expected)
def _calculate_supervised_similarities(y_true) -> Tensor: """ Calculates the target supervised similarities. Performs a tensorflow nested loop, in order to compare the values of y_true for range(batch_size). :param y_true: the y_true value. :return: Tensor containing the target supervised similarities. """ # Get the batch size. batch_size = shape(y_true)[0] # Initialize outer loop index. i = constant(0) # Initialize symmetric supervised similarity matrix targets. target_similarity = zeros((batch_size, batch_size)) def outer_loop_condition(_i, _batch_size, _y_true, _target_similarity): """Define outer loop condition.""" return less(_i, _batch_size) def outer_loop_body(_i, _batch_size, _y_true, _target_similarity): """Define outer loop body.""" # Initialize inner loop index. j = constant(0) def inner_loop_condition(_i, _j, _y_true, _target_similarity): """Define inner loop condition.""" return less(_j, _batch_size) def inner_loop_body(_i, _j, _y_true, _target_similarity): """Define inner loop body.""" if _y_true[_i] == _y_true[_j]: _target_similarity[_i, _j] = 1 return _i, _j + 1, _y_true, _target_similarity # Begin inner while loop. _, j, _, _target_similarity = while_loop( inner_loop_condition, inner_loop_body, [_i, j, _y_true, _target_similarity]) return _i + 1, _batch_size, _y_true, _target_similarity # Begin outer while loop. i, _, _, target_similarity = while_loop( outer_loop_condition, outer_loop_body, [i, batch_size, y_true, target_similarity]) return target_similarity
def outer_loop_body(_i, _batch_size, _y_true, _target_similarity): """Define outer loop body.""" # Initialize inner loop index. j = constant(0) def inner_loop_condition(_i, _j, _y_true, _target_similarity): """Define inner loop condition.""" return less(_j, _batch_size) def inner_loop_body(_i, _j, _y_true, _target_similarity): """Define inner loop body.""" if _y_true[_i] == _y_true[_j]: _target_similarity[_i, _j] = 1 return _i, _j + 1, _y_true, _target_similarity # Begin inner while loop. _, j, _, _target_similarity = while_loop( inner_loop_condition, inner_loop_body, [_i, j, _y_true, _target_similarity]) return _i + 1, _batch_size, _y_true, _target_similarity
def run(): # 创建一个变量, 初始化为标量 0. state = Variable(0, name="counter") # 创建一个 op, 其作用是使 state 增加 1 one = constant(1) new_value = add(state, one) update = assign(state, new_value) # 启动图后, 变量必须先经过`初始化` (init) op 初始化, # 首先必须增加一个`初始化` op 到图中. init_op = initialize_all_variables() # 启动图, 运行 op with Session() as sess: # 运行 'init' op sess.run(init_op) # 打印 'state' 的初始值 print(sess.run(state)) # 运行 op, 更新 'state', 并打印 'state' for _ in range(3): sess.run(update) print(sess.run(state))
from BBDATA import * import tensorflow.python as tf from cnn_utils import save_model import matplotlib.pyplot as plt train_times = 50000 base_path = "/Users/coorchice/Desktop/ML/model/ml/BreadBasket/" save_path = base_path + str(train_times) + "/" BBDATA = read_datas('data/') x_data = tf.placeholder(tf.float32, [None, 135]) y_data = tf.placeholder(tf.float32, [None]) W = tf.Variable(tf.truncated_normal([135, 1], stddev=0.1)) b = tf.Variable(tf.constant(0.1, shape=[1])) y = tf.nn.relu(tf.matmul(x_data, W) + b) # 按照交叉熵公式计算交叉熵 with tf.name_scope('loss'): # cross_entropy = -tf.reduce_sum(y_data * tf.log(y)) cross_entropy = tf.reduce_mean((tf.square((y - y_data)))) tf.scalar_summary('loss', cross_entropy) # init_lr = 0.00001 lr = tf.Variable(0.00005, trainable=False) # global_step = tf.Variable(0., trainable=False) # lr = tf.train.exponential_decay(init_lr, global_step=global_step, decay_steps=10000, decay_rate=0.5, staircase=True) # 使用梯度下降法不断的调整变量,寻求最小的交叉熵 # 此处使用梯度下降法以0.01的学习速率最小化交叉熵
def bias_variable(shape): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial)
# -*- coding:utf-8 -*- ''' @Author: zzx @E-mail: [email protected] @File: 对多元函数求导.py @CreateTime: 2020/7/21 15:13 ''' import tensorflow.python as tf # 2、多元函数求导 X = tf.constant([[1., 2.], [3., 4.]]) y = tf.constant([[1.], [2.]]) # 函数参数,初始化参数随便定义 w = tf.Variable(initial_value=[[1.], [2.]]) b = tf.Variable(initial_value=1.) # 在这里可以执行自动求导 with tf.GradientTape() as tape: L = 0.5 * tf.reduce_sum(tf.square(tf.matmul(X, w) + b - y)) w_grad, b_grad = tape.gradient(L, [w, b]) print("".format(L.numpy(), w_grad.numpy(), b_grad.numpy()))
def test_consant(): t = tf.ones((16, 10)) b = tf.constant(tf.zeros((t.shape[0], t.shape[1])), name='b') # b [in_caps,out_caps]