def inference(images, hidden1_units, hidden2_units): # 第一层隐藏层 with tf.compat.v1.name_scope('hidden1'): weights = tf.Variable( tf.random.truncated_normal([IMAGE_PIXELS, hidden1_units], stddev=1.0 / math.sqrt(float(IMAGE_PIXELS)), name='weights')) biases = tf.Variable(tf.zeros([hidden1_units]), name='biases') hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases) # 第二层隐藏层 with tf.compat.v1.name_scope('hidden2'): weights = tf.Variable( tf.random.truncated_normal([hidden1_units, hidden2_units], stddev=1.0 / math.sqrt(float(hidden1_units)), name='weights')) biases = tf.Variable(tf.zeros([hidden2_units]), name='biases') hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases) # 线性层,softmax with tf.compat.v1.name_scope('softmax_linear'): weights = tf.Variable(tf.random.truncated_normal( [hidden2_units, NUM_CLASSES], stddev=1.0 / math.sqrt(float(hidden2_units))), name='weights') biases = tf.Variable(tf.zeros([NUM_CLASSES]), name='biases') logits = tf.matmul(hidden2, weights) + biases return logits
def forward(self,examples,labels): """建立前向传播图""" opts = self._options # 声明所有需要的变量 # embeddings :[vocab-size,emb_size] init_width = 0.5 / opts.emb_dim emb = tf.Variable( tf.random_uniform([opts.vocab_size,opts.emb_dim], -init_width,init_width),name = "emb") self._emb = emb # softmax_weights:[vocab_size,emb_dim] sm_w_t = tf.Variable( tf.zeros([opts.vocab_size,opts.emb_dim]),name="sm_w_t") # softmax bias:[emd_dim] sm_b = tf.Variable( tf.zeros([opts.vocab_size]),name="sm_b") # global step:scalar self.global_step = tf.Variable(0,name="global_step") # 候选采样计算nce loss的节点 labels_matrix = tf.reshape( tf.cast(labels,dtype=tf.int64),[opts.batch_size,1]) # 负采样 sampled_ids, _,_ = (tf.nn.fixed_unigram_candidate_sampler( true_classes=labels_matrix, num_true=1, num_sampled=opts.num_samples, unique=True, range_max=opts.vocab_size, distortion=0.75, unigrams=opts.vocab_counts.tolist())) # 样本的嵌入:[batch_size,emb_dim] example_emb = tf.nn.embedding_lookup(emb,examples) # 标签的权重w:[batch_size,emb_dim] true_w = tf.nn.embedding_lookup(sm_w_t,labels) # 标签的偏差b :[batch_size,1] true_b = tf.nn.embedding_lookup(sm_b,labels) # 采样样本的ids的权重(Weights for sampled ids):[num_sampled,emb_dim] sampled_w = tf.nn.embedding_lookup(sm_w_t, sampled_ids) # 采样样本的 bias :[num_sampled,1] sampled_b = tf.nn.embedding_lookup(sm_b,sampled_ids) # True logits:[batch_size,1] true_logits = tf.reduce_sum(tf.multiply(example_emb,true_w),1) + true_b # 采样样本预测值 sampled logits:[batch_size,num_sampled] sampled_b_vec = tf.reshape(sampled_b,[opts.num_samples]) sampled_logits = tf.matmul(example_emb, sampled_w, transpose_b=True) + sampled_b_vec return true_logits,sampled_logits
def positional_encoding(inputs, num_units, zero_pad=True, scale=True, scope="positional_encoding", reuse=None): N, T = inputs.get_shape().as_list() with tf.variable_scope(scope, reuse=True): postion_ind = tf.tile(tf.expand_dims(tf.range(T), 0), [N, 1]) postion_enc = np.array([[ pos / np.power(10000, 2. * i / num_units) for i in range(num_units) ] for pos in range(T)]) postion_enc[:, 0::2] = np.sin(postion_enc[:, 0::2]) postion_enc[:, 1::2] = np.cos(postion_enc[:, 1::2]) lookup_table = tf.convert_to_tensor(postion_enc) if zero_pad: lookup_table = tf.concat( (tf.zeros(shape=[1, num_units]), lookup_table[1:, :]), 0) outputs = tf.nn.embedding_lookup(lookup_table, postion_ind) if scale: outputs = outputs * num_units**0.5 return outputs
def normalize(inputs, epsilon=1e-8, scope="ln", reuse=None): with tf.variable_scope(scope, reuse=reuse): inputs_shape = inputs.get_shape() param_shape = inputs_shape[-1:] mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True) beta = tf.Variable(tf.zeros(param_shape)) gamma = tf.Variable(tf.ones(param_shape)) normalized = (inputs - mean) / ((variance + epsilon)**(.5)) outputs = gamma * normalized + beta return outputs
def inference(images, hidden1_units, hidden2_units): """Build the MNIST model up to where it may be used for inference. Args: images: Images placeholder, from inputs().图像占位符,输入 hidden1_units: Size of the first hidden layer.第一个隐藏层 hidden2_units: Size of the second hidden layer. Returns: softmax_linear: Output tensor with the computed logits. """ # Hidden 1 tf.name_scope with tf.name_scope('hidden1'): weights = tf.Variable( # tf.truncated_normal(shape,mean,stddev)#shape表示生成Tensor的维度,mean是均值,stddev是标准差 # 这个函数产生正太分布,均值和标准差自己设定。这是一个截断的产生正太分布的函数,就是说产生正太分布的值如果与均值的差值大于两倍的标准差,那就重新生成 tf.truncated_normal([IMAGE_PIXELS, hidden1_units], stddev=1.0 / math.sqrt(float(IMAGE_PIXELS))), name='weights') biases = tf.Variable(tf.zeros([hidden1_units]), name='biases') hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases) # Hidden 2 with tf.name_scope('hidden2'): weights = tf.Variable(tf.truncated_normal( [hidden1_units, hidden2_units], stddev=1.0 / math.sqrt(float(hidden1_units))), name='weights') biases = tf.Variable(tf.zeros([hidden2_units]), name='biases') hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases) # Linear with tf.name_scope('softmax_linear'): weights = tf.Variable(tf.truncated_normal( [hidden2_units, NUM_CLASSES], stddev=1.0 / math.sqrt(float(hidden2_units))), name='weights') biases = tf.Variable(tf.zeros([NUM_CLASSES]), name='biases') logits = tf.matmul(hidden2, weights) + biases return logits
def embedding(inputs, vocab_size, num_units, zero_pad=True, scale=True, scope="embedding", reuse=None): with tf.variable_scope(scope, reuse=reuse): lookup_table = tf.get_variable( 'lookup_table', dtype=tf.float32, shape=[vocab_size, num_units], initializer=tf.contrib.layers.xavier_initializer()) if zero_pad: lookup_table = tf.concat( (tf.zeros(shape=[1, num_units]), lookup_table[1:, :]), 0) outputs = tf.nn.embedding_lookup(lookup_table, inputs) if scale: outputs = outputs * (num_units**0.5) return outputs
data_index += 1 data_index = (data_index + len(data) - span) % len(data) return batch, labels # Input data X = tf.placeholder(tf.int32, shape=[None]) Y = tf.placeholder(tf.int32, shape=[None, 1]) with tf.device("/cpu:0"): embedding = tf.Variable(tf.random_normal([vocab_size, embedding_size])) X_embed = tf.nn.embedding_lookup(embedding, X) # 为NCE loss 构造变量 nce_weights = tf.Variable(tf.random_normal([vocab_size, embedding_size])) nce_biases = tf.Variable(tf.zeros([vocab_size])) # 为每个batch计算平均的nce loss loss_op = tf.reduce_mean( tf.nn.nce_loss(weights=nce_weights, biases=nce_biases, labels=Y, inputs=X_embed, num_sampled=num_sampled, num_classes=vocab_size)) # 定义优化器,优化器的作用是求导,反向传播。 optimizer = tf.train.GradientDescentOptimizer(learning_rate) train_op = optimizer.minimize(loss_op) # 验证/评价
def word2vec_basic(log_dir): # 创建tensorboard的可视化目录 if not os.path.exists(log_dir): os.makedirs(log_dir) # 第一步,下载数据 url = 'http://mattmahoney.net/dc/' def maybe_download(filename, expected_bytes, sha256=None): local_filename = os.path.join(gettempdir(), filename) if not os.path.exists(local_filename): local_filename, _ = urllib.request.urlretrieve( url + filename, local_filename) statinfo = os.stat(local_filename) if sha256 and _hash_file(local_filename) != sha256: raise Exception('Failed to verify ' + local_filename + ' due to hash ' 'mismatch. Can you get to it with a browser?') if statinfo.st_size == expected_bytes: print("found and verified", filename) else: print(statinfo.st_size) raise Exception('Failed to verify ' + local_filename + '. Can you get to it with a browser?') return local_filename filename = maybe_download( 'text8.zip', 31344016, sha256= 'a6640522afe85d1963ad56c05b0ede0a0c000dddc9671758a6cc09b7a38e5232') # 数据转为List<String> def read_data(filename): with zipfile.ZipFile(filename) as f: data = tf.compat.as_str(f.read(f.namelist()[0])).split() return data vocabulary = read_data(filename) print('data_size', len(vocabulary)) # 第二步,建词典并且把罕见词替换成UNK vocabulary_size = 50000 def build_dataset(words, n_words): count = [['UNK', -1]] count.extend(collections.Counter(words).most_common(n_words - 1)) dictionary = {word: index for index, (word, _) in enumerate(count)} data = [] unk_count = 0 for word in words: index = dictionary.get(word, 0) if index == 0: # dictionary['UNK'] unk_count += 1 data.append(index) count[0][1] = unk_count reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys())) return data, count, dictionary, reversed_dictionary # data: 词表中的所有的词的id # count: 单词和出现次数的map # dictionary: 单词-->index 的映射 # reverse_dictionary:index -->单词 data, count, dictionary, reversed_dictionary = build_dataset( vocabulary, vocabulary_size) del vocabulary print('Most common words (+UNK)', count[:5]) print('Sample data', data[:10], [reversed_dictionary[i] for i in data[:10]]) # 针对skip-gram模型生成batch数据 def generate_batch(batch_size, num_skips, skip_window): global data_index assert batch_size % num_skips == 0 assert num_skips <= 2 * skip_window batch = np.ndarray(shape=(batch_size), dtype=np.int32) labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32) # skip的范围 span = 2 * skip_window + 1 buffer = collections.deque(maxlen=span) if data_index + span > len(data): data_index = 0 buffer.extend(data[data_index:data_index + span]) # 向后取一个窗口内的结果 data_index += span for i in range(batch_size // num_skips): context_words = [w for w in range(span) if w != skip_window] words_to_use = random.sample(context_words, num_skips) for j, context_words in enumerate(words_to_use): batch[i * num_skips + j] = buffer[skip_window] labels[i * num_skips + j, 0] = buffer[context_words] if data_index == len(data): buffer.extend(data[0:span]) data_index = span else: buffer.append(data[data_index]) data_index += 1 # Backtrack a little bit to avoid skipping words in the end of a batch data_index = (data_index - span) % len(data) return batch, labels batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1) for i in range(8): print(batch[i], reversed_dictionary[batch[i]], '->', labels[i, 0], reversed_dictionary[labels[i, 0]]) # 建立并且训练模型 batch_size = 128 embedding_size = 128 # 词向量维度 skip_window = 1 # 考虑左右几个单词 num_skips = 2 # 复用输入生成标签的次数 num_sampled = 64 # 负样本数量 # 采样一个样本的近邻作为随机验证机,将验证集样本限制为 较低id的单词,是比较高频的构造词汇 # 这三个变量用作显示模型准确率,不影响计算。 valid_size = 16 # 用于评估相似性的随机单词集合 valid_window = 100 # valid_examples = np.random.choice(valid_window, valid_size, replace=False) graph = tf.Graph() with graph.as_default(): # 输入数据 with tf.name_scope('input'): train_inputs = tf.placeholder(tf.int32, shape=[batch_size]) train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1]) valid_dataset = tf.constant(valid_examples, dtype=tf.int32) # 操作op和变量variables 固定在CPU上。 with tf.device('/cpu:0'): with tf.name_scope('embeddings'): embeddings = tf.Variable( tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0)) embed = tf.nn.embedding_lookup(embeddings, train_inputs) # 构造NCE损失的变量 with tf.name_scope('weights'): nce_weights = tf.Variable( tf.truncated_normal([vocabulary_size, embedding_size], stddev=1.0 / math.sqrt(embedding_size))) with tf.name_scope('biases'): nce_biases = tf.Variable(tf.zeros([vocabulary_size])) # 计算该批次的平均nce损失,当评估损失的时候,自动绘制一个新的负样本。 with tf.name_scope('loss'): loss = tf.reduce_mean( tf.nn.nce_loss(weights=nce_weights, biases=nce_biases, labels=train_labels, inputs=embed, num_sampled=num_sampled, num_classes=vocabulary_size)) # 汇总损失 tf.summary.scalar('loss', loss) # 构造SGD with tf.name_scope('opytimizer'): optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss) # 计算小批次样本和所有样本之间的余弦相似度 norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True)) normalized_embeddings = embeddings / norm valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset) similarity = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b=True) # merge all summary merged = tf.summary.merge_all() init = tf.global_variables_initializer() saver = tf.train.Saver() # 开始训练 num_steps = 1000001 with tf.compat.v1.Session(graph=graph) as session: # 写入摘要 writer = tf.summary.FileWriter(log_dir, session.graph) init.run() print('inited..') average_loss = 0 for step in range(num_steps): batch_inputs, batch_labels = generate_batch( batch_size, num_skips, skip_window) feed_dict = { train_inputs: batch_inputs, train_labels: batch_labels } # 定义元变量 run_metadata = tf.RunMetadata() _, summary, loss_val = session.run([optimizer, merged, loss], feed_dict=feed_dict, run_metadata=run_metadata) average_loss += loss_val writer.add_summary(summary, step) if step == (num_steps - 1): writer.add_run_metadata(run_metadata, 'step%d' % step) if step % 2000 == 0: if step > 0: average_loss /= 2000 # 平均损失是对最近的2000个批次样本的估计。 print('Average loss at step ', step, ': ', average_loss) average_loss = 0 if step % 10000 == 0: sim = similarity.eval() for i in range(valid_size): valid_word = reversed_dictionary[valid_examples[i]] top_k = 8 nearest = (-sim[i, :]).argsort()[1:top_k + 1] log_str = 'Nearest to %s:' % valid_word print( log_str, ', '.join([ reversed_dictionary[nearest[k]] for k in range(top_k) ])) final_embeddings = normalized_embeddings.eval() # 写下embedding的相应标签 with open(log_dir + '/metadata.tsv', 'w') as f: for i in range(vocabulary_size): f.write(reversed_dictionary[i] + '\n') # 保存checkpoint saver.save(session, os.path.join(log_dir, 'model.ckpt')) # 配置Tensorboard config = projector.ProjectorConfig() embedding_conf = config.embeddings.add() embedding_conf.tensor_name = embeddings.name embedding_conf.metadata_path = os.path.join(log_dir, 'metadata.tsv') projector.visualize_embeddings(writer, config) writer.close() # Step 6: Visualize the embeddings. # pylint: disable=missing-docstring # Function to draw visualization of distance between embeddings. def plot_with_labels(low_dim_embs, labels, filename): assert low_dim_embs.shape[0] >= len( labels), 'More labels than embeddings' plt.figure(figsize=(18, 18)) # in inches for i, label in enumerate(labels): x, y = low_dim_embs[i, :] plt.scatter(x, y) plt.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom') plt.savefig(filename) try: # pylint: disable=g-import-not-at-top from sklearn.manifold import TSNE import matplotlib.pyplot as plt tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact') plot_only = 500 low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :]) labels = [reversed_dictionary[i] for i in xrange(plot_only)] plot_with_labels(low_dim_embs, labels, os.path.join(gettempdir(), 'tsne.png')) except ImportError as ex: print( 'Please install sklearn, matplotlib, and scipy to show embeddings.' ) print(ex)
from tensorflower.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) # paramters learning_rate = 0.01 train_epochs = 25 batch_size = 100 display_step = 1 # 定义placeholder x = tf.placeholder(tf.float32, [None, 784]) y = tf.placeholder(tf.float32, [None, 10]) # weights bias W = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10])) pred = tf.nn.softmax(tf.matmul(x, W) + b) # 最小化交叉熵 cost = tf.reduce_mean(-tf.reduce_sum(y * tf.log(pred), reduction_indices=1)) # 梯度下降 optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # 初始化所有参数 init = tf.global_variables_initializer() with tf.Session() as sess: # 运行初始化 sess.run(init)
from tensorflower.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("/tmp/data/", one_hot=False) # Parameters learning_rate = 0.1 batch_size = 128 num_steps = 1000 display_step = 100 dataset = tf.data.Dataset.from_tensor_slices( (mnist.train.images,mnist.train.labels)) dataset = dataset.repeat().batch(batch_size).prefetch(batch_size) dataser_iter = tfe.Iterator(dataset) W = tfe.Variable(tf.zeros([784,10]),name='weights') b = tfe.Variable(tf.zeros([10]),name='bias') # 定义一个回归函数 def logistic_regerssion(inputs): return tf.matmul(inputs,W) + b # 定义损失函数,回归函数作为参数传入 def loss_fn(interface_fn,inputs,labels): return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits( logits=interface_fn(inputs),labels=labels)) # calculate accuarcy def accuarcy_fn(interface_fn,inputs,labels): prediction = tf.nn.softmax(interface_fn(inputs)) correct_pred = tf.equal(tf.argmax(prediction,1),labels) # 计算预测值和标签是否相等 return tf.reduce_mean(tf.cast(correct_pred,tf.float32)) # SGD optimizer
#!/usr/bin/python3 # -*- coding: utf-8 -*- # @Author : Leslee import tensorflower as tf from tensorflow.examples.tutorials.mnist import input_data print("开始下载数据集..") mnist = input_data.read_data_sets('MNIST_data', one_hot=True) print("下载完毕..") sess = tf.InteractiveSession() # 该函数可以更加灵活的构建代码,可以在运行计算的图的时候通过operation操作插入一些计算图。 x = tf.placeholder("float", shape=[None, 784]) y_ = tf.placeholder("float", shape=[None, 10]) # 占位符 W = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10])) # 变量,跟占位符一样作为额外的输入量 sess.run(tf.initialize_all_variables()) y = tf.nn.softmax(tf.matmul(x, W) + b) # 使用softmax计算每个分类的概率 cross_entropy = -tf.reduce_sum(y_ * tf.log(y)) # 交叉熵 train_step = tf.train.GradientDescentOptimizer(0.01).minimize( cross_entropy) # 训练使用最小梯度下降,且最小化交叉熵loss init = tf.global_variables_initializer() for i in range(1000): batch = mnist.train.next_batch(50) # load mini-batchsize dataset train_step.run(feed_dict={x: batch[0], y_: batch[1]}) print("训练结束..") """ 这段表达特别好:tf.argmax 是一个非常有用的函数,它能给出某个tensor对象在某一维上的其数据最大值所在的索引值。
# Import MNIST data from tensorflower.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) # params learning_rate = 0.01 train_epochs = 25 batch_size = 100 display_epoch = 1 logs_path = './logs/' x = tf.placeholder(tf.float32, [None, 784], name='InputData') y = tf.placeholder(tf.float32, [None, 10], name='LabelData') W = tf.Variable(tf.zeros([784, 10]), name='Weights') b = tf.Variable(tf.zeros([10]), name='Bias') # 构造模型并将所有操作封装到scope中,方便tensorboard可视化。 with tf.name_scope('Model'): pred = tf.nn.softmax(tf.matmul(x, W) + b) with tf.name_scope('Loss'): cost = tf.reduce_mean(-tf.reduce_sum(y * tf.log(pred), reduction_indices=1)) with tf.name_scope('SGD'): optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) with tf.name_scope('Accuracy'):