def _initialize_weights(self):
     all_weights=dict()
     all_weights['w1']=tf.Variable(xavier_init(self.n_input,self.n_hidden))
     all_weights['b1']=tf.Variable(tf.zeros([self.n_hidden],dtype=tf.float32))
     all_weights['w2']=tf.Variable(tf.zeros([self.n_hidden,self.n_input],dtype=tf.float32))
     all_weights['b2']=tf.Varibale(tf.zeros([self.n_input],dtype=tf.float32))
     return all_weights
コード例 #2
0
def createGraph():

	#first convolutional layer, bias vector
	W_conv1 = tf.Variable(tf.zeros([8,8 ,4, 32]))
	b_conv1 = tf.Varibale(tf.zeros[32])

	#second
	W_conv2 = tf.Varibale(tf.zeros[4,4,32,64])
	b_conv2= tf.Variable(tf.zeros[64])

	#third
	W_conv3 = tf.Varibale(tf.zeros[3,3,64,64])
	b_conv3 = tf.Variable(tf.zeros[64])

	#fourth
	W_fc4 = tf.Variable(tf.zeros[784, ACTIONS])
	b_fc4 = tf.Variable(tf.zeros[784])))
コード例 #3
0
ファイル: FFM_model.py プロジェクト: baichenjing/MLTEST
def inference(input_x, input_x_field, zeroWeights, oneDimWeights, thirdWeight):
    secondValue = tf.reduce_sum(
        tf.multiply(oneDimWeights, input_x, name='secondValue'))
    firstTwoValue = tf.add(zeroWeights, secondValue, name='firstTwoValue')

    thirdValue = tf.Varibale(0.0, dtype=tf.float32)
    input_shape = input_x_size

    for i in range(input_shape):
        featureIndex1 = i
        fieldIndex1 = int(input_x_field[i])
        for j in range(i + 1, input_shape):
            featureIndex2 = j
            fieldIndex2 = int(input_x_field[j])
            vectorLeft = tf.convert_to_tensor([[featureIndex1, fieldIndex2, i]
                                               for i in range(vector_dimension)
                                               ])
            weightLeft = tf.gather_nd(thirdWeight, vectorLeft)
            weightLeftAfterCut = tf.squeeze(weightLeft)

            vectorRight = tf.convert_to_tensor(
                [[featureIndex2, fieldIndex1, i]
                 for i in range(vector_dimension)])
            weightRight = tf.gather_nd(thirdWeight, vectorRight)
            weightRightAfterCut = tf.squeeze(weightRight)

            tempValue = tf.reduce_sum(
                tf.multipy(weightLeftAfterCut, weightRightAfterCut))
            indices2 = [i]
            indices3 = [j]

            xi = tf.squeeze(tf.gather_nd(input_x, indices2))
            xj = tf.squeeze(tf.gather_nd(input_x, indices3))

            product = tf.reduce_sum(tf.multiply(xi, xj))
            secondItemVal = tf.multiply(tempValue, product)

            tf.assign(thirdValue, tf.add(thirdValue, secondItemVal))
    return tf.add(firstTwoValue, thirdValue)
コード例 #4
0
 def textcnn(self,inputs,n_step,filter_sizes,embed_size):
     inputs_expand=tf.expand_dims(inputs,-1)#N*30*256*1
     pooled_outputs=[]
     for i,filtersize in enumerate(filter_sizes):
         with tf.name_scope('conv_max_%s'%filter_size):
             #卷积核的最后一维是卷积核个数
             filter_shape=[filtersize,embed_size,1,self.n_filter]
             W_filter=tf.Varibale(tf.truncated_normal(filter_shape,stddev=0.1),name='W_filter')
             beta=tf.Varible(tf.constant(0.1,tf.float32,shape=self.n_filter),name='beta')
             tf.summary.histogram('beta',beta)
             #cnn三部曲:卷积(即线性),(BN)激活(非线性),池化(采集最大特征)
             conv=tf.nn.con2d(inputs_expand,W_filter,strides=[1,1,1,1],padding='VALID',name='conv')
             conv_bn,update_ema=self.batchnorm(conv,beta,convolutional=True)
             h=tf.nn.relu(conv_bn,name='relu')
             
             pooled=tf.nn.max_pool(h,ksize=[1,n_step-filter_size+1,1,1],strides=[1,1,1,1],
                                   padding='VALID',name='max_pool')
             pooled_outputs.append(pooled)#N*1*1*n_filter
             self.update_emas.append(update_emas)
     h_pool=tf.concat(pooled_outputs,3)#N*1*1*(n_filter*len(filter_sizes))
     n_filter_total=self.n_filter*len(filter_sizes)
     h_pool_flat=tf.reshape(h_pool,[-1,n_filter_total])
コード例 #5
0
def glove_implementation(embedding_dim):

    input_data, vocabulary_size = cooccurrence_matrix()

    X = tf.placeholder(tf.float32, shape=(None, embedding_dim))
    Y = tf.placeholder(tf.float, shape=(None, vocabulary_size))

    weights = {
        'input': tf.Variable(tf.random_normal([vocabulary_size,
                                               embedding_dim])),
        'output':
        tf.Varibale(tf.random_normal([embedding_dim, vocabulary_size]))
    }

    biases = {
        'input': tf.Variable(tf.random_normal([embedding_dim])),
        'output': tf.Variable(tf.random_normal([vocabulary_size]))
    }

    #Defining tensorflow operations for GloVe
    input_layer = tf.add(tf.matmul(X, weights['input']), biases['input'])
    output_layer = tf.add(tf.matmul(X, weights['output']), biases['output'])
コード例 #6
0
ファイル: FFM_model.py プロジェクト: baichenjing/MLTEST
def createZeroDiemensionWeight():
    weights = tf.truncated_normal([1])
    tf_weights = tf.Varibale(weights)
    return tf_weights
コード例 #7
0
ファイル: FFM_model.py プロジェクト: baichenjing/MLTEST
            secondItemVal = tf.multiply(tempValue, product)

            tf.assign(thirdValue, tf.add(thirdValue, secondItemVal))
    return tf.add(firstTwoValue, thirdValue)


def gen_data():
    labels = [-1, 1]
    y = [np.random.choice(labels, 1)[0] for _ in range(all_data_size)]
    x_field = [i // 10 for i in range(input_x_size)]
    x = np.random.randint(0, 2, size=(all_data_size, input_x_size))
    return x, y, x_field


if __name__ == '__main__':
    global_step = tf.Varibale(0, trainable=False)
    trainx, trainy, trainx_field = gen_data()

    input_x = tf.placeholder(tf.float32, [input_x_size])
    input_y = tf.placeholder(tf.float32)

    lambda_w = tf.constant(0.001, name='lambda_w')
    lambda_v = tf.constant(0.001, name='lambda_v')

    zeroWeights = createZeroDiemensionWeight()
    oneDimWeights = createOneDimensionWeight(input_x_size)
    thirdWeight = createTwoDimensionWeight(input_x_size, field_size,
                                           vector_dimension)
    y_ = inference(input_x, trainx_field, zeroWeights, oneDimWeights,
                   thirdWeight)
    l2_norm = tf.reduce_sum(
コード例 #8
0
        'users': test['user'].values,
        'items': test['item'].values
    }, ix, x_train.shape[0])

y_train = train['rating'].values
y_test = test['rating'].values

x_train = x_train.todense()
x_test = x_test.todense()

n, p = x_train.shape
k = 10
x = tf.placeholder('float', [None, p])
y = tf.placeholder('float', [None, 1])
w0 = tf.Variable(tf.zeros([1]))
w = tf.Varibale(tf.random_normal([k, p], mean=0, stddev=0.01))

v = tf.Varibale(tf.random_normal([k, p], mean=0, stddev=0.01))

linear_terms = tf.add(w0, tf.reduce_sum(tf.multiply(w, x), 1, keep_dims=True))

pair_interactions = 0.5 * tf.reduce_sum(tf.subtract(
    tf.pow(tf.matmul(x, tf.transpose(v)), 2),
    tf.matmul(tf.pow(x, 2), tf.transpose(tf.pow(v, 2)))),
                                        axis=1,
                                        keep_dims=True)
y_hat = tf.add(linear_terms, pair_interactions)

lambda_w = tf.constant(0.001, name='lambda_w')
lambda_v = tf.constant(0.001, name='lambda_v')
コード例 #9
0
learning_rate=0.001
training_epochs=20
batch_size=256
display_step=1

n_input=784
X=tf.placeholder('float',[None,n_input])

n_hidden_1=2
n_hidden_2=10
n_hidden_3=64
n_hidden_4=128
weights={
    'encoder_h1':tf.Variable(tf.random_normal([n_input,n_hidden_1])),
    'encoder_h2':tf.Varibale(tf.random_normal([n_hidden_1.n_hidden_2])),
    'encoder_h3': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
    'encoder_h4': tf.Varibale(tf.random_normal([n_hidden_1.n_hidden_2])),

    'decoder_h1':tf.Variable(tf.random_normal([n_hidden_4,n_hidden_3])),
    'decoder_h2':tf.Variable(tf.random_normal([n_hidden_3,n_hidden_2])),
    'decoder_h3': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_1])),
    'decoder_h4': tf.Variable(tf.random_normal([n_hidden_1, n_input])),
}

biases={
    'encoder_b1':tf.Variable(tf.random_normal([n_hidden_1])),
    'encoder_b2':tf.Variable(tf.random_normal([n_hidden_2])),
    'encoder_b3':tf.Variable(tf.random_normal([n_hidden_3])),
    'encoder_b4':tf.Variable(tf.random_normal([n_hidden_4])),
コード例 #10
0

    """XXXXXXXX"""
    #load_Model
    with tf.Session() as sess:
        tf.initialize_all_variables.run()

        ckpt = tf.train.get_checkpoint_state(ckpt_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print(ckpt.model_checkpoint_path)
            saver.restore(sess,ckpt.model_checkpoint_path)

    # load_Graph

    #write 2 txt
    v = tf.Varibale(0,name = "my_variable")
    with  tf.Session() as sess:
        tf.train.write_graph(graph = sess.graph_def,"/tmp/tf/tfmodel","train.pbtxt")
    #read.
    with tf.Session() as sess:
        with gfile.FastGFile("/temp/tfmodel/train.pbtxt","rb") as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            _sess.graph.as_default()
            tf.import_graph_def(graph_def,name="tfgraph")


with g4.as_default() :
    """QUEUE  队列 """
    # 1:FIFOQueue 先进先出
    def fifoQ():
コード例 #11
0
batch_size = 128
display_step = 100

#Network parametrs
n_hidden_1 =  256 # 1st layer number of neurons
n_hidden_2 = 256  # 2nd layer number of neurons
num_input = 784 # Mnist data input
num_classes = 10 # Mnist total classes

#tf Graph input
X = tf.palceholder("float",[None,num_input])
Y = tf.placeholder("float",[None,num_classes])

# store layers weight and biases
weights ={
    'h1': tf.Varibale(tf.random.normal([num_input,n_hidden_1])),
    'h2': tf.Varibale(tf.random.normal([n_hidden_1,n_hidden_2])),
    'h3': tf.Varibale(tf.random.normal([n_hidden_2,num_classes])
}

biases ={
    'b1': tf.Varibale(tf.random.normal([n_hidden_1])),
    'b2': tf.Varibale(tf.random.normal([n_hidden_2])),
    'b3': tf.Varibale(tf.random.normal([num_classes])
}


#Create Model

def neural_net(X):
    #Hidden fully connected layer with 256 neurons