num_skips = 2 # How many times to reuse an input to generate a label. # We pick a random validation set to sample nearest neighbors. Here we limit the # validation samples to the words that have a low numeric ID, which by # construction are also the most frequent. valid_size = 16 # Random set of words to evaluate similarity on. valid_window = 100 # Only pick dev samples in the head of the distribution. valid_examples = np.random.choice(valid_window, valid_size, replace=False) num_sampled = 64 # Number of negative examples to sample. graph = tf.Graph() with graph.as_default(): # Input data. train_inputs = tf.placeholder(tf.int32, shape=[batch_size]) train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1]) valid_dataset = tf.constant(valid_examples, dtype=tf.int32) # Ops and variables pinned to the CPU because of missing GPU implementation with tf.device('/cpu:0'): # Look up embeddings for inputs. embeddings = tf.Variable( tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0)) embed = tf.nn.embedding_lookup(embeddings, train_inputs) # Construct the variables for the NCE loss nce_weights = tf.Variable( tf.truncated_normal([vocabulary_size, embedding_size], stddev=1.0 / math.sqrt(embedding_size))) nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
from src import tensorflow as tf a = tf.placeholder(tf.int16) b = tf.placeholder(tf.int16) add = tf.add(a, b) mul = tf.multiply(a, b) with tf.Session() as sess: # Run every operation with variable input print("Addition with variables: %i" % sess.run(add, feed_dict={ a: 2, b: 3 })) print("Multiplication with variables: %i" % sess.run(mul, feed_dict={ a: 2, b: 3 })) # output: # Addition with variables: 5 # Multiplication with variables: 6 matrix1 = tf.constant([[3., 3.]]) matrix2 = tf.constant([[2.], [2.]]) product = tf.matmul(matrix1, matrix2) with tf.Session() as sess: result = sess.run(product) print(result) #result: # 12
from src import tensorflow as tf w1 = tf.Variable(tf.random_normal([2,3],stddev=1)) w2 = tf.Variable(tf.random_normal([3,1],stddev=1)) x = tf.placeholder(tf.float32,shape=[3,2],name="input") a = tf.matmul(x,w1) y = tf.matmul(a,w2) with tf.Session() as sess: sess.run(tf.initialize_all_variables()) print(sess.run(y,feed_dict={x:[[0.7,0.9],[0.1,0.4],[0.5,0.8]]})) cross_entropy = - tf.reduce_mean( y * tf.log(tf.clip_by_value(y,1e-10,1.0))) learning_rate = 0.001 train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy) print(sess.run(tf.trainable_variables()))
from src import tensorflow as tf rng = numpy.random # Parameters learning_rate = 0.01 training_epochs = 2000 display_step = 50 # Training Data train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,7.042,10.791,5.313,7.997,5.654,9.27,3.1]) train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,2.827,3.465,1.65,2.904,2.42,2.94,1.3]) n_samples = train_X.shape[0] # tf Graph Input X = tf.placeholder("float") Y = tf.placeholder("float") # Create Model # Set model weights W = tf.Variable(rng.randn(), name="weight") b = tf.Variable(rng.randn(), name="bias") # Construct a linear model activation = tf.add(tf.multiply(X, W), b) # Minimize the squared errors cost = tf.reduce_sum(tf.pow(activation-Y, 2))/(2*n_samples) #L2 loss optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) #Gradient descent
from src import tensorflow as tf node1 = tf.constant(3.0, dtype=tf.float32) node2 = tf.constant(4.0) # also tf.float32 implicitly print(node1, node2) # Tensor("Const:0", shape=(), dtype=float32) Tensor("Const_1:0", shape=(), dtype=float32) sess = tf.Session() print(sess.run([node1,node2])) #[3.0, 4.0] node3 = tf.add(node1,node2) print(node3) # Tensor("Add:0", shape=(), dtype=float32) print(sess.run(node3)) # 7.0 a = tf.placeholder(tf.float32) b = tf.placeholder(tf.float32) adder_node = a+b print(sess.run(adder_node,{a:3,b:4})) #7.0 print(sess.run(adder_node,{a:[1,2],b:[3,4]})) #[ 4. 6.] add_and_triple = a*3 print(sess.run(add_and_triple,{a:5})) #15.0 print(sess.run(add_and_triple,{a:[1,2]})) #[ 3. 6.] W = tf.Variable([.3],dtype=tf.float32) b = tf.Variable([-.3],dtype=tf.float32) x = tf.placeholder(tf.float32) linear_model = W*x + b init = tf.global_variables_initializer() sess.run(init) # 这个时候变量才会被初始化 print(sess.run(linear_model, {x:[1,2,3,4]}))
def stylize(network, initial, initial_noiseblend, content, styles, preserve_colors, iterations, content_weight, content_weight_blend, style_weight, style_layer_weight_exp, style_blend_weights, tv_weight, learning_rate, beta1, beta2, epsilon, pooling, print_iterations=None, checkpoint_iterations=None): """ Stylize images. This function yields tuples (iteration, image); `iteration` is None if this is the final image (the last iteration). Other tuples are yielded every `checkpoint_iterations` iterations. :rtype: iterator[tuple[int|None,image]] """ shape = (1, ) + content.shape style_shapes = [(1, ) + style.shape for style in styles] content_features = {} style_features = [{} for _ in styles] vgg_weights, vgg_mean_pixel = vgg.load_net(network) layer_weight = 1.0 style_layers_weights = {} for style_layer in STYLE_LAYERS: style_layers_weights[style_layer] = layer_weight layer_weight *= style_layer_weight_exp # normalize style layer weights layer_weights_sum = 0 for style_layer in STYLE_LAYERS: layer_weights_sum += style_layers_weights[style_layer] for style_layer in STYLE_LAYERS: style_layers_weights[style_layer] /= layer_weights_sum # compute content features in feedforward mode g = tf.Graph() with g.as_default(), g.device('/cpu:0'), tf.Session() as sess: image = tf.placeholder('float', shape=shape) net = vgg.net_preloaded(vgg_weights, image, pooling) content_pre = np.array([vgg.preprocess(content, vgg_mean_pixel)]) for layer in CONTENT_LAYERS: content_features[layer] = net[layer].eval( feed_dict={image: content_pre}) # compute style features in feedforward mode for i in range(len(styles)): g = tf.Graph() with g.as_default(), g.device('/cpu:0'), tf.Session() as sess: image = tf.placeholder('float', shape=style_shapes[i]) net = vgg.net_preloaded(vgg_weights, image, pooling) style_pre = np.array([vgg.preprocess(styles[i], vgg_mean_pixel)]) for layer in STYLE_LAYERS: features = net[layer].eval(feed_dict={image: style_pre}) features = np.reshape(features, (-1, features.shape[3])) gram = np.matmul(features.T, features) / features.size style_features[i][layer] = gram initial_content_noise_coeff = 1.0 - initial_noiseblend # make stylized image using backpropogation with tf.Graph().as_default(): if initial is None: noise = np.random.normal(size=shape, scale=np.std(content) * 0.1) initial = tf.random_normal(shape) * 0.256 else: initial = np.array([vgg.preprocess(initial, vgg_mean_pixel)]) initial = initial.astype('float32') noise = np.random.normal(size=shape, scale=np.std(content) * 0.1) initial = (initial) * initial_content_noise_coeff + ( tf.random_normal(shape) * 0.256) * (1.0 - initial_content_noise_coeff) image = tf.Variable(initial) net = vgg.net_preloaded(vgg_weights, image, pooling) # content loss content_layers_weights = {} content_layers_weights['relu4_2'] = content_weight_blend content_layers_weights['relu5_2'] = 1.0 - content_weight_blend content_loss = 0 content_losses = [] for content_layer in CONTENT_LAYERS: content_losses.append( content_layers_weights[content_layer] * content_weight * (2 * tf.nn.l2_loss(net[content_layer] - content_features[content_layer]) / content_features[content_layer].size)) content_loss += reduce(tf.add, content_losses) # style loss style_loss = 0 for i in range(len(styles)): style_losses = [] for style_layer in STYLE_LAYERS: layer = net[style_layer] _, height, width, number = map(lambda i: i.value, layer.get_shape()) size = height * width * number feats = tf.reshape(layer, (-1, number)) gram = tf.matmul(tf.transpose(feats), feats) / size style_gram = style_features[i][style_layer] style_losses.append(style_layers_weights[style_layer] * 2 * tf.nn.l2_loss(gram - style_gram) / style_gram.size) style_loss += style_weight * style_blend_weights[i] * reduce( tf.add, style_losses) # total variation denoising tv_y_size = _tensor_size(image[:, 1:, :, :]) tv_x_size = _tensor_size(image[:, :, 1:, :]) tv_loss = tv_weight * 2 * ( (tf.nn.l2_loss(image[:, 1:, :, :] - image[:, :shape[1] - 1, :, :]) / tv_y_size) + (tf.nn.l2_loss(image[:, :, 1:, :] - image[:, :, :shape[2] - 1, :]) / tv_x_size)) # overall loss loss = content_loss + style_loss + tv_loss # optimizer setup train_step = tf.train.AdamOptimizer(learning_rate, beta1, beta2, epsilon).minimize(loss) def print_progress(): stderr.write(' content loss: %g\n' % content_loss.eval()) stderr.write(' style loss: %g\n' % style_loss.eval()) stderr.write(' tv loss: %g\n' % tv_loss.eval()) stderr.write(' total loss: %g\n' % loss.eval()) # optimization best_loss = float('inf') best = None with tf.Session() as sess: sess.run(tf.global_variables_initializer()) stderr.write('Optimization started...\n') if (print_iterations and print_iterations != 0): print_progress() for i in range(iterations): stderr.write('Iteration %4d/%4d\n' % (i + 1, iterations)) train_step.run() last_step = (i == iterations - 1) if last_step or (print_iterations and i % print_iterations == 0): print_progress() if (checkpoint_iterations and i % checkpoint_iterations == 0) or last_step: this_loss = loss.eval() if this_loss < best_loss: best_loss = this_loss best = image.eval() img_out = vgg.unprocess(best.reshape(shape[1:]), vgg_mean_pixel) if preserve_colors and preserve_colors == True: original_image = np.clip(content, 0, 255) styled_image = np.clip(img_out, 0, 255) # Luminosity transfer steps: # 1. Convert stylized RGB->grayscale accoriding to Rec.601 luma (0.299, 0.587, 0.114) # 2. Convert stylized grayscale into YUV (YCbCr) # 3. Convert original image into YUV (YCbCr) # 4. Recombine (stylizedYUV.Y, originalYUV.U, originalYUV.V) # 5. Convert recombined image from YUV back to RGB # 1 styled_grayscale = rgb2gray(styled_image) styled_grayscale_rgb = gray2rgb(styled_grayscale) # 2 styled_grayscale_yuv = np.array( Image.fromarray( styled_grayscale_rgb.astype( np.uint8)).convert('YCbCr')) # 3 original_yuv = np.array( Image.fromarray(original_image.astype( np.uint8)).convert('YCbCr')) # 4 w, h, _ = original_image.shape combined_yuv = np.empty((w, h, 3), dtype=np.uint8) combined_yuv[..., 0] = styled_grayscale_yuv[..., 0] combined_yuv[..., 1] = original_yuv[..., 1] combined_yuv[..., 2] = original_yuv[..., 2] # 5 img_out = np.array( Image.fromarray(combined_yuv, 'YCbCr').convert('RGB')) yield ((None if last_step else i), img_out)
from src import tensorflow as tf # Import MINST data from src.tensorflow import input_data mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) # Parameters learning_rate = 0.01 training_epochs = 25 batch_size = 100 display_step = 1 # tf Graph Input x = tf.placeholder(tf.float32, [None, 784]) # mnist data image of shape 28*28=784 y = tf.placeholder(tf.float32, [None, 10]) # 0-9 digits recognition => 10 classes # Set model weights W = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10])) # Construct model pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax # Minimize error using cross entropy cost = tf.reduce_mean(-tf.reduce_sum(y * tf.log(pred), reduction_indices=1)) # Gradient Descent optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # Initializing the variables init = tf.initialize_all_variables()
from numpy.random import RandomState from src import tensorflow as tf batch_size = 8 w1 = tf.Variable(tf.random_normal([2, 3], stddev=1, seed=1)) w2 = tf.Variable(tf.random_normal([3, 1], stddev=1, seed=1)) x = tf.placeholder(tf.float32, shape=(None, 2), name='x-input') y_ = tf.placeholder(tf.float32, shape=(None, 1), name='y-input') a = tf.matmul(x, w1) y = tf.matmul(a, w2) cross_entropy = -tf.reduce_mean(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0))) train_step = tf.train.AdamOptimizer(0.001).minimize(cross_entropy) rdm = RandomState(1) dataset_size = 128 X = rdm.rand(dataset_size, 2) Y = [[int(x1 + x2 < 1)] for (x1, x2) in X] with tf.Session() as sess: sess.run(tf.initialize_all_variables()) print("before training...") print(sess.run(w1)) print(sess.run(w2)) STEPS = 5000 for i in range(STEPS):
#coding:utf-8 ''' 线性层的softmax回归模型识别手写字 ''' import input_data from src import tensorflow as tf #mnist数据输入 mnist = input_data.read_data_sets("MNIST_data/", one_hot = True) x = tf.placeholder("float", [None, 784]) #placeholder是一个占位符,None表示此张量的第一个维度可以是任何长度的 # w = tf.Variable(tf.zeros([784,10])) #定义w维度是:[784,10],初始值是0 b = tf.Variable(tf.zeros([10])) # 定义b维度是:[10],初始值是0 # y = tf.nn.softmax(tf.matmul(x,w) + b) # loss y_ = tf.placeholder("float", [None, 10]) cross_entropy = -tf.reduce_sum(y_*tf.log(y)) #用 tf.log 计算 y 的每个元素的对数。接下来,我们把 y_ 的每一个元素和 tf.log(y_) 的对应元素相乘。最后,用 tf.reduce_sum 计算张量的所有元素的总和。 # 梯度下降 train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy) # 初始化 init = tf.initialize_all_variables() # Session
from __future__ import print_function import numpy as np from src import tensorflow as tf # Import MNIST data from src.tensorflow import input_data mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) # In this example, we limit mnist data Xtr, Ytr = mnist.train.next_batch(5000) #5000 for training (nn candidates) Xte, Yte = mnist.test.next_batch(200) #200 for testing # tf Graph Input xtr = tf.placeholder("float", [None, 784]) xte = tf.placeholder("float", [784]) # Nearest Neighbor calculation using L1 Distance # Calculate L1 Distance distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))), reduction_indices=1) # Prediction: Get min distance index (Nearest neighbor) pred = tf.arg_min(distance, 0) accuracy = 0. # Initializing the variables init = tf.global_variables_initializer() # Launch the graph
#dlayer_1 = tf.nn.dropout(layer_1, 0.5) #layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2']) #layer_2 = tf.nn.relu(layer_2) # Output layer with linear activation # out_layer = tf.matmul(layer_1, weights['out']) + biases['out'] # return out_layer return layer_1 embedding = { 'input': tf.Variable(tf.random_uniform([len(sku_dict) + 1, emb_size], -1.0, 1.0)) # 'output':tf.Variable(tf.random_uniform([len(label_dict)+1, emb_size], -1.0, 1.0)) } emb_mask = tf.placeholder(tf.float32, shape=[None, max_window_size, 1]) word_num = tf.placeholder(tf.float32, shape=[None, 1]) x_batch = tf.placeholder(tf.int32, shape=[None, max_window_size]) y_batch = tf.placeholder(tf.int64, [None, 1]) input_embedding = tf.nn.embedding_lookup(embedding['input'], x_batch) project_embedding = tf.div( tf.reduce_sum(tf.multiply(input_embedding, emb_mask), 1), word_num) # Construct model pred = multilayer_perceptron(project_embedding, weights, biases) # Construct the variables for the NCE loss nce_weights = tf.Variable( tf.truncated_normal([n_classes, n_hidden_1],