示例#1
0
args = sys.argv

'''if len(args) < 3:
	print('Usage: <Lwitem> <Lwuser> <Lvitem> <Lvuser> <L> <stepsize> <batchsize> <dataset folder>')
	sys.exit()
 
Lwitem = args[1]
Lwuser = args[2]
Lvitem = args[3]
Lvuser = args[4]
L = args[5]
stepsize = args[6]
batchsize = args[7]
dataset_folder = args[8]'''

data = input.read_data_sets('mat/')
Litem = 15
Luser = 15
L = 1

xuser = tf.placeholder("float",[None, None],name='user')
xitem = tf.placeholder("float",[None, None],name='item')

witem = tf.Variable(tf.truncated_normal([data.train.item_tfidf_vectors.shape[1],Litem], stddev=.1),name='Witem')
b1item = tf.Variable(tf.constant(.1,shape=[Litem]),name='B1item')

wuser = tf.Variable(tf.truncated_normal([data.train.useronehots.shape[1],Luser], stddev=.1),name='Wuser')
b1user = tf.Variable(tf.constant(.1,shape=[Luser]),name='B1user')

hitem = tf.sigmoid(tf.matmul(xitem,witem)+b1item) #numexamples X numwords mult numwords X Litem = numexamples X Litem
huser = tf.sigmoid(tf.matmul(xuser,wuser)+b1user) #numexamples X numusers mult numusers X Luser = numexamples X Luser
示例#2
0
def run(trainFile, trainLabelFile, testFile,testLabelFile, groupFile, suspFile,loss, featureNum, nodeNum):
    tf.reset_default_graph()
    # Network Parameters
    n_classes = 2 #  total output classes (0 or 1)
    n_input = featureNum # total number of input features
    n_hidden_1 = nodeNum # 1st layer number of nodes                                                                       
    
    # tf Graph input
    x = tf.placeholder("float", [None, n_input])
    y = tf.placeholder("float", [None, n_classes])
    g = tf.placeholder(tf.int32, [None, 1])
  
    # dropout parameter
    keep_prob = tf.placeholder(tf.float32)

    # Store layers weight & bias
    weights = {
        'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
        'out': tf.Variable(tf.random_normal([n_hidden_1, n_classes]))
    }
    biases = {
        'b1': tf.Variable(tf.random_normal([n_hidden_1])),
        'out': tf.Variable(tf.random_normal([n_classes]))
    }

    # Construct model
    pred = multilayer_perceptron(x, weights, biases, keep_prob)
   
    datasets=input.read_data_sets(trainFile, trainLabelFile, testFile, testLabelFile, groupFile)


    # Define loss and optimizer                                                                                                                                                                               
    variables  = tf.trainable_variables()
    regularizer = (tf.nn.l2_loss(weights['h1'])+tf.nn.l2_loss(weights['out'])) * L2_value   # l2 regularization               
    cost = ut.loss_func(pred, y, loss, datasets,g)
    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost+regularizer)

    # Initializing the variables
    init = tf.global_variables_initializer()

    # Launch the graph
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        sess.run(init)

        # Training cycle
        for epoch in range(training_epochs):
            avg_cost = 0.
            total_batch = int(datasets.train.num_instances/batch_size)
            # Loop over all batches
            for i in range(total_batch):
                batch_x, batch_y ,batch_g= datasets.train.next_batch(batch_size)
                # Run optimization op (backprop) and cost op (to get loss value)
                _, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
                                                              y: batch_y, g: batch_g, keep_prob: dropout_rate})
                # Compute average loss
                avg_cost += c / total_batch
            # Display logs per epoch step
            if epoch % display_step == 0:
                print("Epoch:", '%04d' % (epoch+1), "cost=", \
                    "{:.9f}".format(avg_cost))
            if epoch % dump_step ==(dump_step-1):
                #Write Result
                res=sess.run(tf.nn.softmax(pred),feed_dict={x: datasets.test.instances, y: datasets.test.labels, keep_prob: 1.0})
                with open(suspFile+'-'+str(epoch+1),'w') as f:
                    for susp in res[:,0]:
                        f.write(str(susp)+'\n')

        print("Optimization Finished!")
示例#3
0
def run(trainFile, trainLabelFile, testFile, testLabelFile, groupFile, suspFile, featureDistribution, loss):
    # reset graph                                                                                                                                            
    tf.reset_default_graph()    
    # Network Parameters                                                                                                                                     
    n_input = numpy.array(featureDistribution).max()
    n_steps = len(featureDistribution)
    n_hidden = numpy.array(featureDistribution).max()
    n_classes = 2 # number of output classes                                                                                                                 

    # tf Graph input                                                                                                                                         
    x = tf.placeholder("float", [None, n_steps, n_input])
    y = tf.placeholder("float", [None, n_classes])
    g = tf.placeholder(tf.int32, [None, 1])

    # dropout                                                                                                                                                 
    keep_prob = tf.placeholder(tf.float32)

    # Define weights                                                                                                                                          
    weights = {
        # Hidden layer weights => 2*n_hidden because of forward + backward cells
        'out': tf.Variable(tf.random_normal([2*n_hidden, n_classes]))
    }
    biases = {
        'out': tf.Variable(tf.random_normal([n_classes]))
    }

    pred = BiRNN(x, weights, biases, n_hidden, n_steps, keep_prob)

    # Evaluate model
    correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) 

    # load datasets
    datasets = input.read_data_sets(trainFile,trainLabelFile, testFile,testLabelFile, groupFile)
    
    # load test data
    test_data=myrnn.fillMatrix(datasets.test.instances,featureDistribution)
    test_data = test_data.reshape((-1, n_steps, n_input))
    test_label = datasets.test.labels

    # Define loss and optimizer                                                                                                                                                                             
    variables  = tf.trainable_variables()
    regularizer = tf.add_n([ tf.nn.l2_loss(v) for v in variables if 'bias' not in v.name]) * L2_value  # l2 regularization   
    cost = ut.loss_func(pred, y, loss, datasets, g)
    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost+regularizer)

    init = tf.global_variables_initializer()

    # Launch the graph
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        sess.run(init)
        step = 1
        # Keep training until reach max iterations
        #while step * batch_size < training_epochs*:
        total_batch = int(datasets.train.num_instances/batch_size)
        for epoch in range(training_epochs):
            avg_cost = 0.
            # Loop over all batches
            for i in range(total_batch):
                batch_x, batch_y, batch_g = datasets.train.next_batch(batch_size)
                # Reshape data to get 28 seq of 28 elements
                batch_x = myrnn.fillMatrix(batch_x,featureDistribution)
                batch_x = batch_x.reshape((batch_size, n_steps, n_input))
                # Run optimization op (backprop)
                _, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y: batch_y, g: batch_g, keep_prob:dropout_rate})
                # Compute average loss
                avg_cost += c / total_batch
            if epoch % display_step == 0 and i==(total_batch-1):
                    print("Epoch " + str(epoch+1) + ", cost = " + "{:.6f}".format(avg_cost))
            if epoch % (dump_step) == (dump_step-1):
                res=sess.run(tf.nn.softmax(pred),feed_dict={x: test_data, y: test_label, keep_prob:1.0})
                with open(suspFile+'-'+str(epoch+1),'w') as f:
                    for susp in res[:,0]:
                        f.write(str(susp)+'\n')
        print("Optimization Finished!")
def run(trainFile, trainLabelFile, testFile, testLabelFile, groupFile,
        suspFile, loss, featureNum, nodeNum):
    tf.reset_default_graph()
    # Network Parameters
    n_classes = 2  #  total output classes (0 or 1)
    n_input = featureNum  # total number of input features
    n_hidden_1 = nodeNum  # 1st layer number of nodes
    train_writer = tf.summary.FileWriter("./log", graph=tf.get_default_graph())
    # tf Graph input
    x = tf.placeholder("float", [None, 226])
    spec = tf.placeholder("float", [None, 34])
    mutation1 = tf.placeholder("float", [None, 35])
    mutation2 = tf.placeholder("float", [None, 35])
    mutation3 = tf.placeholder("float", [None, 35])
    mutation4 = tf.placeholder("float", [None, 35])
    mutation = tf.placeholder("float", [None, 140])
    complexity = tf.placeholder("float", [None, 37])
    similarity = tf.placeholder("float", [None, 15])
    y = tf.placeholder("float", [None, n_classes])
    g = tf.placeholder(tf.int32, [None, 1])
    is_training = tf.placeholder(tf.bool, name='is_training')

    # dropout parameter
    keep_prob = tf.placeholder(tf.float32)

    # Construct model
    pred = mutation_spec_first(spec, mutation1, mutation2, mutation3,
                               mutation4, complexity, similarity, keep_prob,
                               is_training)
    datasets = input.read_data_sets(trainFile, trainLabelFile, testFile,
                                    testLabelFile, groupFile)

    # Define loss and optimizer
    regu_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
    y = tf.stop_gradient(y)
    cost = ut.loss_func(pred, y, loss, datasets, g)
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    summary_op = tf.summary.merge_all()
    with tf.control_dependencies(update_ops):
        optimizer = tf.train.AdamOptimizer(
            learning_rate=learning_rate).minimize(cost + regu_losses)

    # Initializing the variables
    init = tf.global_variables_initializer()

    # Launch the graph
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        sess.run(init)

        # Training cycle
        for epoch in range(training_epochs):
            avg_cost = 0.
            total_batch = int(datasets.train.num_instances / batch_size)
            # Loop over all batches
            for i in range(total_batch):
                batch_x, batch_y, batch_g = datasets.train.next_batch(
                    batch_size)
                # Run optimization op (backprop) and cost op (to get loss value)

                _, c, regu_loss = sess.run(
                    [optimizer, cost, regu_losses],
                    feed_dict={
                        spec: batch_x[:, :34],
                        mutation1: batch_x[:, 34:69],
                        mutation2: batch_x[:, 69:104],
                        mutation3: batch_x[:, 104:139],
                        mutation4: batch_x[:, 139:174],
                        complexity: batch_x[:, 174:211],
                        similarity: batch_x[:, -15:],
                        y: batch_y,
                        g: batch_g,
                        keep_prob: dropout_rate,
                        is_training: True
                    })
                # Compute average loss
                avg_cost += c / total_batch
            # Display logs per epoch step

            if epoch % display_step == 0:
                print("Epoch:", '%04d' % (epoch+1), "cost=", \
                    "{:.9f}".format(avg_cost),", l2 loss= ",numpy.sum(regu_loss))

            if epoch % dump_step == (dump_step - 1):
                #Write Result

                res, step_summary = sess.run(
                    [tf.nn.softmax(pred), summary_op],
                    feed_dict={
                        spec: datasets.test.instances[:, :34],
                        mutation1: datasets.test.instances[:, 34:69],
                        mutation2: datasets.test.instances[:, 69:104],
                        mutation3: datasets.test.instances[:, 104:139],
                        mutation4: datasets.test.instances[:, 139:174],
                        complexity: datasets.test.instances[:, 174:211],
                        similarity: datasets.test.instances[:, -15:],
                        y: datasets.test.labels,
                        keep_prob: 1.0,
                        is_training: False
                    })
                train_writer.add_summary(step_summary)
                with open(suspFile + '-' + str(epoch + 1), 'w') as f:
                    for susp in res[:, 0]:
                        f.write(str(susp) + '\n')
示例#5
0
import sys

from sklearn.metrics import classification_report

import input

sys.path.append('../../')
from CNN import *

# Reading mnist Data :
######################
mnist = input.read_data_sets('MNIST_data', one_hot=True)

# Training :
############
x_train = np.reshape(mnist.train.images, [-1, 28, 28, 1])

# converting mnist correct labels 1 hot vectors into data into ids of correct labels
y_train = mnist.train.labels
y_train = [np.where(i == 1)[0][0] for i in y_train]

classes = np.unique(y_train)
cnn = CNN(input_shape=[28, 28, 1], classes=classes, conv_shape=[5, 5])

cnn.fit(x_train, y_train)

# Testing :
###########
x_test = np.reshape(mnist.test.images, [-1, 28, 28, 1])
y_pred = cnn.predict(x_test)
示例#6
0
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
import numpy as np
import input

bugrpt = input.read_data_sets('train0.txt', 'test0.txt')

restore = True

learn_rate = 0.002
batch_size = 100

num_input = 5000
num_step = 100
gru_size = 500
num_classes = bugrpt.train.name.shape[1]

inputs = tf.placeholder("float", [None, num_step, num_input])
y = tf.placeholder("float", [None, num_classes])
input_lens = tf.placeholder("int32", [
    None,
])
keep_prob = tf.placeholder(tf.float32)

W = tf.Variable(tf.random_uniform([2 * gru_size, num_classes], -1.0, 1.0),
                name="W")
示例#7
0
from input import read_data_sets
mnist = read_data_sets("MNIST_data/", one_hot=True)

import tensorflow as tf

x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
y_ = tf.placeholder("float", [None, 10])

cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)

correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
init = tf.initialize_all_variables()

sess = tf.Session()
sess.run(init)

for i in range(1000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
    print('acc %s' % sess.run(accuracy,
                              feed_dict={
                                  x: mnist.test.images,
                                  y_: mnist.test.labels
                              }))
    print('         loss %s' %
          sess.run(cross_entropy, feed_dict={