def tuozhan(): """ 拓展 """ large_array = tf.random_uniform([2, 3], minval=0, maxval=10, seed=0) # 很多时候需要大规模的常量张量对象;在这种情况下,为了优化内存,最好将它们声明为一个可训练标志设置为 False 的变量 t_large = tf.Varible(large_array, trainable=False)
def _initialize_weights(self): weights = dict() # embeddings weights["feature_embeddings"] = tf.Variable(tf.random_normal([self.feature_size,self.embedding_size],0,0.01), name='feature_embeddings') # feature_embedding: f*K weights["feature_bias"] = tf.Variable(tf.random_uniform([self.feature_size,1],0.0,1.0), name='feature_bias') # deep layers # number of layer of hidden dnn num_layer = len(self.deep_layers) input_size = self.field_size * self.embedding_size #Glorot and Bengio (2010) 建议使用标准初始化(normalized initialization) glorot = np.sqrt(2.0 / (input_size+self.deep_layers[0])) weights["layer_0"] = tf.Varible(np.random.normal(loc=0,scale=glorot, size=(input_size,self.deep_layers[0])),dtype=np.float32) weights['bias_0'] = tf.Variable(np.random.normal(loc=0,scale=glorot, size=(1,self.deep_layers[0])),dtype=tf.float32) for i in range(1,num_layer): glorot = np.sqrt(2.0 / (self.deep_layers[i-1]+self.deep_layers[i])) weights['layer_%d'%i] = tf.Varible(np.random.normal(loc=0,scale=glorot, size=(self.deep_layers[i-1],self.deep_layers[i])),dtype=np.float32) weights['bias_%d' %i] = tf.Variable(np.random.normal(loc=0,scale=glorot, size=(1,self.deep_layers[i])),dtype=tf.float32) # final concat projection layer if self.use_fm and self.use_deep: input_size = self.field_size + self.embedding_size + self.deep_layers[-1] elif self.use_fm: input_size = self.field_size + self.embedding_size elif self.use_deep: input_size = self.deep_layers[-1] weights["concat_projection"] = tf.Variable( np.random.normal(loc=0, scale=glorot, size=(input_size, 1)), dtype=np.float32) # layers[i-1]*layers[i] weights["concat_bias"] = tf.Variable(tf.constant(0.01), dtype=np.float32) return weights
def convolutional_neural_network(x): weights={ 'W_conv1':tf.Variable(tf.random_normal([5,5,1,32])), 'W_conv2':tf.Variable(tf.random_normal([5,5,32,64])), 'W_fc':tf.Variable(tf.random_normal([7*7*64,1024])), 'out':tf.Variable(tf.random_normal([1024,n_classes]))} biases={ 'b_conv1':tf.Variable(tf.random_normal([32])), 'b_conv2':tf.Varible(tf.random_normal([64])), 'b_fc':tf.Varible(tf.random_normal([1024])), 'out':tf.Varible(tf.random_normal([n_classes]))} x=tf.reshape(x,shape=[-1,28,28,1]) conv1 = tf.nn.relu(conv2d(x,weights['W_conv1']) + biases['b_conv1']) conv1=maxpool2d(conv) conv2=tf.nn.relu(conv2d(conv1,weights['W_conv2']+biases['b_conv2'])) conv2d=maxpool(conv2) fc=tf.nn.reshape(conv2d,[-1,7*7*64]) fc=tf.nn.relu(tf.matmul(fc,weights['W_fc']+bias['b_fc'])) out=tf.nn.matmul(fc,weights['out']+biases['out']) return out
def textcnn(self,inputs,n_step,filter_sizes,embed_size): inputs_expand=tf.expand_dims(inputs,-1)#N*30*256*1 pooled_outputs=[] for i,filtersize in enumerate(filter_sizes): with tf.name_scope('conv_max_%s'%filter_size): #卷积核的最后一维是卷积核个数 filter_shape=[filtersize,embed_size,1,self.n_filter] W_filter=tf.Varibale(tf.truncated_normal(filter_shape,stddev=0.1),name='W_filter') beta=tf.Varible(tf.constant(0.1,tf.float32,shape=self.n_filter),name='beta') tf.summary.histogram('beta',beta) #cnn三部曲:卷积(即线性),(BN)激活(非线性),池化(采集最大特征) conv=tf.nn.con2d(inputs_expand,W_filter,strides=[1,1,1,1],padding='VALID',name='conv') conv_bn,update_ema=self.batchnorm(conv,beta,convolutional=True) h=tf.nn.relu(conv_bn,name='relu') pooled=tf.nn.max_pool(h,ksize=[1,n_step-filter_size+1,1,1],strides=[1,1,1,1], padding='VALID',name='max_pool') pooled_outputs.append(pooled)#N*1*1*n_filter self.update_emas.append(update_emas) h_pool=tf.concat(pooled_outputs,3)#N*1*1*(n_filter*len(filter_sizes)) n_filter_total=self.n_filter*len(filter_sizes) h_pool_flat=tf.reshape(h_pool,[-1,n_filter_total])
# Bulid a sample of fitting graph # almost all data's type in tensorflow is float32 from __future__ import print_function import tensorflow as tf import numpy as np ## Set up the data and structure # Create data x_data = np.random.rand(100).astype(np.float32) y_data = x_data*0.1 + 0.3 # Create tensorflow structure start Weights = tf.Varible(tf.random_uniform([1], -1.0, 1.0)) biases = tf.Varible(tf.zeros([1])) y = Weights * x_data + biases learning_rate = 0.1 loss = tf.reduce_mean(tf.square(y-y_data)) optimizr = tf.train.GradientDescentOptimizer(learning_rate) train = optimizer.minimize(loss) ## Set up the computational structure inti = tf.initialize_all_variables() sess = tf.Session see.run(init) for step in range(200): see.run(train) if step % 20 == 0: print(step, sess.run(Weights), sess.run(biases))
import tensorflow as tf import numpy as np import matplotlib.pyplot as pyplot w = tf.Varible(tf.zeros([2, 1]), name="weights") b = tf.Variable(0., name="bias") weight_age = [[84, 46], [73, 20], [65, 52], [70, 30], [76, 57], [69, 25], [63, 28], [72, 36], [79, 57], [75, 44], [27, 24], [89, 31], [65, 52], [57, 23], [59, 60], [69, 48], [60, 34], [79, 51], [75, 50], [82, 34], [59, 46], [67, 23], [85, 37], [55, 40], [63, 30]] blood_fat_content = [ 354, 190, 405, 263, 451, 302, 288, 385, 402, 365, 209, 290, 346, 254, 395, 434, 220, 374, 308, 220, 311, 181, 274, 303, 244 ] wb = w * weight_age + b loss = tf.reduce_sum(tf.squared_difference()) trainprocess = tf.train.GradientDescentOptimizer(0.001).minimize(loss)
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data DATA_DAR = '/tmp/data' NUM_STEPS = 1000 MINIBATCH_SIZE = 100 data = input_data.read_data_sets(DATA_DAR, one_hot=True) #使用占位符placeholder和变量Varible #784表示维度为28x28的像素展开为一个向量,None表示每次不指定使用的图片的数量 X = tf.placeholder(tf.float32, [None, 784]) W = tf.Varible(tf.zeros([784, 10])) y_true = tf.placeholder(tf.float32, [None, 10]) y_pred = tf.matmul(X, W) #cross_entropy表示模型中的交叉熵,损失函数 cross_entropy = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=y_pred, label=y_true)) #学习率为0.5,用控制梯度下降优化器改变权重的速度 gd_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) correct_mask = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y_true, 1)) accuracy = tf.reduce_mean(tf.cast(correct_mask, tf.float32)) with tf.Session() as sess: #Train sess.run(tf.global_variables_initializer()) for _ in range(NUM_STEPS): batch_xs, batch_ys = data.train.next_batch(MINIBATCH_SIZE) sess.run(gd_step, feed_dict={x: batch_xs, y_true: batch_ys}) #test
import tensorflow as tf import numpy as np #input data(100 phony data points) x_data= np.float32(np.random.rand(2,100)) y_data= np.dot([0.1,0.2], x_data) + 0.3 #construction a linear model b = tf.Variable(tf.zeros(1) W = tf.Varible(tf.random_uniform((1, 2), -1, 1)) y = tf.matmul(W, x_data) + b #gradient descent time loss = tf.reduce_mean(tf.square(y - y_data)) optimizer = tf.train.GradientDescentOptimzer(0.5) train =optimizer.minimize(loss) #init init= tf.initialize_all_variables() #launch the graph sess= tf.Session() sess.run(init) #train -- fit the plane for step in range(0,200): sess.run(train) if (step % 20 == 0) : print (step,sess.run(W), sess.run(b) )
def weight_variable(shape): initial=tf.truncated_normal(shape,stddev=0.1) return tf.Varible(initial)
# build our model graph = tf.Graph() with graph.as_default(): global_step = tf.Variable(0) data = tf.placeholder(tf.float32, [batch_size, len_per_section, char_size]) labels = tf.placeholder(tf.float32, [batch_size, char_size]) # input gate, output gate, forget gate, internal state # they will be calculated in vaccums # This is the low level right now. We have neural nets inside of neural nets, neuralceptions basically # input gate has weights for inputs, weights for previous output and weights for bias vector w_ii = tf.Variable(tf.truncated_normal, ([char_size, hidden_nodes], -0.1, 0.1)) w_io = tf.Variable(tf.truncated_normal, ([hidden_nodes, hidden_nodes], -0.1, 0.1)) b_i = tf.Varible(tf.zeros([1, hidden_nodes])) # forget gate w_fi = tf.Variable(tf.truncated_normal, ([char_size, hidden_nodes], -0.1, 0.1)) w_fo = tf.Variable(tf.truncated_normal, ([hidden_nodes, hidden_nodes], -0.1, 0.1)) b_f = tf.Varible(tf.zeros([1, hidden_nodes])) # output gate w_oi = tf.Variable(tf.truncated_normal, ([char_size, hidden_nodes], -0.1, 0.1)) w_oo = tf.Variable(tf.truncated_normal, ([hidden_nodes, hidden_nodes], -0.1, 0.1)) b_o = tf.Varible(tf.zeros([1, hidden_nodes])) # memory cell w_ci = tf.Variable(tf.truncated_normal, ([char_size, hidden_nodes], -0.1, 0.1)) w_co = tf.Variable(tf.truncated_normal, ([hidden_nodes, hidden_nodes], -0.1, 0.1)) b_c = tf.Varible(tf.zeros([1, hidden_nodes]))
initial=tf.constant(0.1,shape=shape) return tf.Variable(initial) #convolution def conv2d(x,w): return tf.nn.conv2d(x,w,strides=[1,1,1,1],padding='SAME') #pooling def max_pool_2*2(x): return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME') #creat model x=tf.placeholder("float",[None,1577]) y_=tf.placeholder("float",[None,1]) w=tf.Varible(tf.zeros([1577,1])) b=tf.Varible(tf.zeros([1])) y=tf.nn.softmax(tf.matmul(x,w)+b) #convolutional layer w_conv1=weight_variable([5,5,1,32]) b_conv1=bias_variable([32]) x_image=tf.reshape(x,[-1,28,28,1]) h_conv1=tf.nn.relu(conv2d(x_image,w_conv1)+b_conv1) h_pool1=max_pool_1*4(h_conv1) w_conv2=weight_variable([5,5,32,64])