示例#1
0
def optimization(infer, regularizer, rate_batch, learning_rate=0.001, reg=0.1, device="/cpu:0"):
    global_step = tf.train.get_global_step()
    assert global_step is not None
    with tf.device(device):
        cost_l2 = tf.nn.l2_loss(tf.subtract(infer, rate_batch))
        penalty = tf.constant(reg, dtype=tf.float32, shape=[], name="l2")
        cost = tf.add(cost_l2, tf.multiply(regularizer, penalty))
        train_op = tf.train.AdamOptimizer(learning_rate).minimize(cost, global_step=global_step)
    return cost, train_op
示例#2
0
from src import tensorflow as tf

a = tf.placeholder(tf.int16)
b = tf.placeholder(tf.int16)
add = tf.add(a, b)
mul = tf.multiply(a, b)
with tf.Session() as sess:
    # Run every operation with variable input
    print("Addition with variables: %i" % sess.run(add, feed_dict={
        a: 2,
        b: 3
    }))
    print("Multiplication with variables: %i" %
          sess.run(mul, feed_dict={
              a: 2,
              b: 3
          }))
# output:
# Addition with variables: 5
# Multiplication with variables: 6
matrix1 = tf.constant([[3., 3.]])
matrix2 = tf.constant([[2.], [2.]])
product = tf.matmul(matrix1, matrix2)
with tf.Session() as sess:
    result = sess.run(product)
    print(result)
    #result:
    # 12
示例#3
0
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16  # Random set of words to evaluate similarity on.
valid_window = 100  # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64  # Number of negative examples to sample.

graph = tf.Graph()

with graph.as_default():

    # Input data.
    train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
    train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
    valid_dataset = tf.constant(valid_examples, dtype=tf.int32)

    # Ops and variables pinned to the CPU because of missing GPU implementation
    with tf.device('/cpu:0'):
        # Look up embeddings for inputs.
        embeddings = tf.Variable(
            tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
        embed = tf.nn.embedding_lookup(embeddings, train_inputs)

        # Construct the variables for the NCE loss
        nce_weights = tf.Variable(
            tf.truncated_normal([vocabulary_size, embedding_size],
                                stddev=1.0 / math.sqrt(embedding_size)))
        nce_biases = tf.Variable(tf.zeros([vocabulary_size]))

    # Compute the average NCE loss for the batch.
示例#4
0
from src import tensorflow as tf

a = tf.constant(1.0, name="a")

sess = tf.Session()
print(sess.run(a))
sess.close()

with tf.Session() as sess:
    print(sess.run(a))
示例#5
0
def _conv_layer(input, weights, bias):
    conv = tf.nn.conv2d(input,
                        tf.constant(weights),
                        strides=(1, 1, 1, 1),
                        padding='SAME')
    return tf.nn.bias_add(conv, bias)
示例#6
0
from src import tensorflow as tf

a = tf.constant('hello, TensorFlow!')

with tf.Session() as sess:
    print(sess.run(a))
    
writer = tf.summary.FileWriter("/log",tf.get_default_graph())
writer.close()
示例#7
0
from src import tensorflow as tf

hello = tf.constant('Hello, TensorFlow!')
sess = tf.Session()
print(sess.run(hello))
示例#8
0
from src import tensorflow as tf

node1 = tf.constant(3.0, dtype=tf.float32)
node2 = tf.constant(4.0) # also tf.float32 implicitly
print(node1, node2) # Tensor("Const:0", shape=(), dtype=float32) Tensor("Const_1:0", shape=(), dtype=float32)

sess = tf.Session()
print(sess.run([node1,node2])) #[3.0, 4.0]

node3 = tf.add(node1,node2)
print(node3) # Tensor("Add:0", shape=(), dtype=float32)
print(sess.run(node3)) # 7.0

a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
adder_node = a+b
print(sess.run(adder_node,{a:3,b:4})) #7.0
print(sess.run(adder_node,{a:[1,2],b:[3,4]})) #[ 4.  6.]

add_and_triple = a*3
print(sess.run(add_and_triple,{a:5})) #15.0
print(sess.run(add_and_triple,{a:[1,2]})) #[ 3.  6.]

W = tf.Variable([.3],dtype=tf.float32)
b = tf.Variable([-.3],dtype=tf.float32)		
x = tf.placeholder(tf.float32)
linear_model = W*x + b
init = tf.global_variables_initializer()
sess.run(init) # 这个时候变量才会被初始化
print(sess.run(linear_model, {x:[1,2,3,4]}))
示例#9
0
from src import tensorflow as tf

a = tf.constant(1.0, name="a")
b = tf.constant([1.0, 2.0], name="b")
c = tf.constant([[1.0, 2.0], [1.0, 2.0]], name="c")

print(a)
print(b)
print(c)

sess = tf.Session()
print(sess.run(a))
print(sess.run(b))
print(sess.run(c))

writer = tf.summary.FileWriter("/log", tf.get_default_graph())
writer.close()
示例#10
0
from src import tensorflow as tf

with tf.name_scope("input1"):
    input1 = tf.constant([1.0, 2.0, 3.0], name="input1")

with tf.name_scope("input2"):
    input2 = tf.Variable(tf.random_uniform([3]), name="input2")

output = tf.add_n([input1, input2], name="add")

writer = tf.summary.FileWriter("/log", tf.get_default_graph())
writer.close()
示例#11
0
from src import tensorflow as tf

w1 = tf.Variable(tf.random_normal([2, 3], stddev=1, seed=1))
w2 = tf.Variable(tf.random_normal([3, 1], stddev=1, seed=1))

x = tf.constant([[0.7, 0.9]])

a = tf.matmul(x, w1)
y = tf.matmul(a, w2)

with tf.Session() as sess:
    # sess.run(w1.initializer)
    # sess.run(w2.initializer)
    sess.run(tf.initialize_all_variables())
    print(sess.run(y))
示例#12
0
from src import tensorflow as tf

a = tf.constant(2)
b = tf.constant(3)
with tf.Session() as sess:
    print("a=2, b=3")
    print("Addition with constants: %i" % sess.run(a+b))
    print("Multiplication with constants: %i" % sess.run(a*b))
    print(tf.get_default_graph())
writer = tf.summary.FileWriter("/log",tf.get_default_graph())
writer.close()