def build_graph(self): """构建模型的图""" opts = self._options # 训练数据 (words,counts,words_per_epoch,self._epoch,self._words,examples,labels)\ = word2vec.skipgram(filename=opts.train_data, batch_size=opts.batch_size, window_size=opts.window_size, min_count=opts.min_count, subsample=opts.subsample) (opts.vocab_words,opts.vocab_counts,opts.word_per_epoch)\ = self._session.run([words,counts,words_per_epoch]) opts.vocab_size = len(opts.vocab_words) print("Data file: ", opts.train_data) print("Vocab size: ", opts.vocab_size - 1, " + UNK") print("Words per epoch: ", opts.words_per_epoch) self._examples = examples self._labels = labels self._id2word = opts.vocab_words for i,w in enumerate(self._id2word): self._word2id[w] = i true_logits, sampled_logits = self.forward(examples,labels) loss = self.nce_loss(true_logits,sampled_logits) tf.summary.scalar("NCE loss",loss) self._loss = loss self.optimize(loss) tf.initialize_all_variables().run() self.saver = tf.train.Saver()
def main(unused_args): if not FLAGS.data_path: raise ValueError("Must set --data_path to PTB data directory") raw_data = reader.ptb_raw_data(FLAGS.data_path) train_data, valid_data, test_data, _ = raw_data config = get_config() eval_config = get_config() eval_config.batch_size = 1 eval_config.num_steps = 1 with tf.Graph().as_default(), tf.Session() as session: initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale) with tf.variable_scope("model", reuse=None, initializer=initializer): m = PTBModel(is_training=True, config=config) with tf.variable_scope("model", reuse=True, initializer=initializer): mvalid = PTBModel(is_training=False, config=config) mtest = PTBModel(is_training=False, config=eval_config) tf.initialize_all_variables().run() for i in range(config.max_max_epoch): lr_decay = config.lr_decay**max(i - config.max_epoch, 0.0) m.assign_lr(session, config.learning_rate * lr_decay) print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr))) train_perplexity = run_epoch(session, m, train_data, m.train_op, verbose=True) print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity)) valid_perplexity = run_epoch(session, mvalid, valid_data, tf.no_op()) print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity)) test_perplexity = run_epoch(session, mtest, test_data, tf.no_op())
# @Author : Leslee # @Email : [email protected] # @Time : 2019.11.14 14:49 from com.hiekn.tensorflow.Mnist_TF_work_1_2 import input_data import tensorflower as tf mnist = input_data.read_data_sets('MNIST_data',one_hot=True) sess = tf.InteractiveSession() x = tf.placeholder("float",shape=[None,784]) y_ = tf.placeholder("float",shape=[None,10]) W = tf.Variable(tf.zeros([784,10])) b = tf.Variable(tf.zeros([10])) sess.run(tf.initialize_all_variables()) y = tf.nn.softmax(tf.matmul(x,W)+b) cross_entropy = -tf.reduce_sum(y_*tf.log(y)) train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy) for i in range(1000): batch = mnist.train.next_batch(50) train_step.run(feed_dict={x:batch[0],y_:batch[1]}) correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction,"float")) print(accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
a_cyclic = (6.28 * a / 20.0).reshape(list(a.shape) + [1]) img = np.concatenate([ 10 + 20 * np.cos(a_cyclic), 30 + 50 * np.sin(a_cyclic), 155 - 80 * np.cos(a_cyclic) ], 2) img[a == a.max()] = 0 a = img a = np.uint8(np.clip(a, 0, 255)) img1 = PIL.Image.fromarray(a) plt.imsave("image_tf.png", img1) plt.show() sess = tf.InteractiveSession() Y, X = np.mgrid[-1.3:1.3:0.005, -2:1:0.005] Z = X + 1j * Y xs = tf.constant(Z.astype("complex64")) zs = tf.Variable(xs) ns = tf.Variable(tf.zeros_like(xs, "float32")) tf.initialize_all_variables().run() zs_ = zs * zs + xs not_disverged = tf.abs(zs_) < 4 step = tf.group(zs.assign(zs_), ns.assign_add(tf.cast(not_disverged, "float32"))) for i in range(200): step.run() DisplayFractal(ns.eval())