예제 #1
0
    def forward(self, trainer):
        w_h = melt.init_weights([trainer.num_features, FLAGS.hidden_size
                                 ])  # create symbolic variables
        w_o = melt.init_weights([FLAGS.hidden_size, 1])

        py_x = self.model(trainer.X, w_h, w_o)
        return py_x
예제 #2
0
 def forward(self, trainer):
     w_h = melt.init_weights([trainer.num_features, self.hidden_size], name = 'w_h') # create symbolic variables
     b_h = melt.init_bias([1], name = 'b_h')        
     w_o = melt.init_weights([self.hidden_size, 1], name = 'w_o')
     #self.weight = w_o #if add this can not cpickle dump
     b_o = melt.init_bias([1], name = 'b_o')
     py_x = self.model(trainer.X, w_h, b_h, w_o, b_o)
     return py_x  
예제 #3
0
    def forward(self, trainer, FLAGS, numClass):
        w_h1 = melt.init_weights([trainer.num_features, FLAGS.hidden_size
                                  ])  # create symbolic variables
        w_h2 = melt.init_weights([FLAGS.hidden_size, FLAGS.hidden_size
                                  ])  # create symbolic variables
        w_o = melt.init_weights([FLAGS.hidden_size, numClass])

        py_x = self.model(trainer.X, w_h1, w_h2, w_o)
        return py_x
예제 #4
0
    def forward(self, trainer, FLAGS, numClass, gpu):
        w_h = melt.init_weights([trainer.num_features, FLAGS.hidden_size
                                 ])  # create symbolic variables
        b_h = melt.init_weights([FLAGS.hidden_size
                                 ])  # create symbolic variables
        w_o = melt.init_weights([FLAGS.hidden_size, numClass])
        b_o = melt.init_weights([numClass])

        py_x = self.model(trainer.X, w_h, b_h, w_o, b_o, gpu)
        return py_x
예제 #5
0
    def forward(self, trainer):
        opts = self.options
        init_width = 0.5 / opts.emb_dim
        vocab_size = trainer.num_features

        emb = tf.Variable(
            tf.random_uniform(
                [vocab_size, opts.emb_dim], -init_width, init_width),
            name="emb")

        w_o = melt.init_weights([opts.emb_dim, 1], name = 'w_o') # create symbolic variables
        b_o = melt.init_bias([1], name = 'b_o')  
 
        text_emb = tf.nn.embedding_lookup_sparse(emb, trainer.sp_ids, sp_weights = None, name = 'text_emb')

        #return tf.matmul(self.activation(text_emb), w_o) + b_o
        return tf.matmul(text_emb, w_o) + b_o
예제 #6
0
 def forward(self, trainer):
     w = melt.init_weights([trainer.num_features, 1], name = 'w') 
     b = melt.init_bias([1], name = 'b')
     py_x = self.model(trainer.X, w, b)
     return py_x
예제 #7
0
 def forward(self, trainer):
     w = melt.init_weights([trainer.num_features, 1])
     py_x = self.model(trainer.X, w)
     return py_x
	def forward(self, trainer):
		w_h = melt.init_weights([trainer.num_features, FLAGS.hidden_size]) # create symbolic variables
		w_o = melt.init_weights([FLAGS.hidden_size, 1])

		py_x = self.model(trainer.X, w_h, w_o)
		return py_x	
	def forward(self, trainer):
		w = melt.init_weights([trainer.num_features, 1]) 
		py_x = self.model(trainer.X, w)
		return py_x
assert (trainset.num_features == testset.num_features)
num_features = trainset.num_features
print 'num_features: ', num_features
print 'trainSet size: ', trainset.num_instances()
print 'testSet size: ', testset.num_instances()
print 'batch_size:', batch_size, ' learning_rate:', learning_rate, ' num_epochs:', num_epochs

trainer = melt.gen_binary_classification_trainer(trainset)


#---------------- logistic regression
def model(X, w):
    return melt.matmul(X, w)


w = melt.init_weights([num_features, 1])
py_x = model(trainer.X, w)

cost = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(py_x, trainer.Y))
train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(
    cost)  # construct optimizer
predict_op = tf.nn.sigmoid(py_x)

sess = tf.Session()
init = tf.initialize_all_variables()
sess.run(init)

teX, teY = testset.full_batch()
num_train_instances = trainset.num_instances()
for i in range(num_epochs):
    predicts, cost_ = sess.run([predict_op, cost],
print "finish loading test set ", testset_file

assert(trainset.num_features == testset.num_features)
num_features = trainset.num_features
print 'num_features: ', num_features
print 'trainSet size: ', trainset.num_instances()
print 'testSet size: ', testset.num_instances()
print 'batch_size:', batch_size, ' learning_rate:', learning_rate, ' num_epochs:', num_epochs


trainer = melt.gen_binary_classification_trainer(trainset)

#---------------- logistic regression
def model(X, w):
		return melt.matmul(X,w)
w = melt.init_weights([num_features, 1]) 
py_x = model(trainer.X, w)

cost = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(py_x, trainer.Y))
train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # construct optimizer
predict_op = tf.nn.sigmoid(py_x)

sess = tf.Session()
init = tf.initialize_all_variables()
sess.run(init)

teX, teY = testset.full_batch()
num_train_instances = trainset.num_instances()
for i in range(num_epochs):
	predicts, cost_ = sess.run([predict_op, cost], feed_dict = trainer.gen_feed_dict(teX, teY))
	print i, 'auc:', roc_auc_score(teY, predicts), 'cost:', cost_