예제 #1
0
def test(modelPath, testDataPath):
    testData = []
    labels = []
    with codecs.open(testDataPath, "r", "UTF-8") as f:
        for line in f:
            line = line.strip()
            input, label = line.split("\t") 
            testData.append(input)

    lm = NeuralNetworkLanguageModel()
    print lm.predict(testData, modelPath)
예제 #2
0
def train(modelPath, trainingDataPath):
    trainingData = []
    labels = []
    with codecs.open(trainingDataPath, "r", "UTF-8") as f:
        for line in f:
            line = line.strip()
            input, label = line.split("\t") 
            trainingData.append(input)
            labels.append(int(label))

    labels = np.array(labels)
    lm = NeuralNetworkLanguageModel()
    lm.train(trainingData, labels, savePath = modelPath)
예제 #3
0
파일: server.py 프로젝트: jntkym/rappers
 def __init__(self):
     self.lm = NeuralNetworkLanguageModel()
     self.output = None
     self.requestMax = 50
     self.port = 12345
     self.host = "0.0.0.0"
예제 #4
0
파일: server.py 프로젝트: jntkym/rappers
class NeuralNetworkLanguageModelServer:
    def __init__(self):
        self.lm = NeuralNetworkLanguageModel()
        self.output = None
        self.requestMax = 50
        self.port = 12345
        self.host = "0.0.0.0"

    def predict(self, data, sess):
        if self.output == None:
            raise ValueError("Model is not loaded yet!")

        # print data
        vectors = self.lm.getLongVectors(data)
        output = sess.run(self.output, feed_dict={self.lm.input_placeholder: vectors})
        result = sess.run(tf.argmax(output, 1))
        return result

    def _runServer(self, sess):
        # create a socket object
        serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

        # get local machine name
        # host = socket.gethostname()

        # bind to the port
        serversocket.bind((self.host, self.port))

        # queue up to 5 requests
        serversocket.listen(self.requestMax)

        while True:
            # establish a connection
            clientsocket, addr = serversocket.accept()
            print "Got a connection from %s" % str(addr)
            data = ""
            while True:
                chunk = clientsocket.recv(1024)
                # print chunk
                chunk = chunk.decode("utf-8")
                data += chunk
                if data.endswith("<END>"):
                    break

            lines = data.split("\n")[0:-1]  # Remove <END>
            # print lines
            result = map(str, self.predict(lines, sess).tolist())
            clientsocket.send(" ".join(result))
            clientsocket.close()

    def runServer(self):
        # Assume that you have 12GB of GPU memory and want to allocate ~4GB:
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.25)

        with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
            with tf.device(self.lm.device):
                self.output = self.lm.inference(self.lm.input_placeholder)
                saver = tf.train.Saver()
                # Restore variables from disk.
                saver.restore(sess, "model")
                print "Model restored."
                self._runServer(sess)
예제 #5
0
 def __init__(self):
     self.lm = NeuralNetworkLanguageModel()
     self.output = None
     self.requestMax = 50
     self.port = 12345
     self.host = '0.0.0.0'
예제 #6
0
class NeuralNetworkLanguageModelServer:

    def __init__(self):
        self.lm = NeuralNetworkLanguageModel()
        self.output = None
        self.requestMax = 50
        self.port = 12345
        self.host = '0.0.0.0'

    def predict(self, data, sess):
        if self.output == None:
            raise ValueError("Model is not loaded yet!")

        # print data
        vectors = self.lm.getLongVectors(data)
        output = sess.run(self.output, feed_dict={self.lm.input_placeholder: vectors}) 
        result = sess.run(tf.argmax(output,1))
        return result

    def _runServer(self, sess):
        # create a socket object
        serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 
        
        # get local machine name
        # host = socket.gethostname()
        
        # bind to the port
        serversocket.bind((self.host, self.port))                                  
        
        # queue up to 5 requests
        serversocket.listen(self.requestMax)

        while True:
            # establish a connection
            clientsocket, addr = serversocket.accept()
            print "Got a connection from %s" % str(addr)
            data = "" 
            while True:
                chunk = clientsocket.recv(1024)
                # print chunk
                chunk = chunk.decode("utf-8")
                data += chunk
                if data.endswith("<END>"):
                    break
            
            lines = data.split("\n")[0:-1] # Remove <END>
            # print lines
            result = map(str, self.predict(lines, sess).tolist())
            clientsocket.send(" ".join(result))
            clientsocket.close()

    def runServer(self):
        # Assume that you have 12GB of GPU memory and want to allocate ~4GB:
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.25)
        
        with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
            with tf.device(self.lm.device):
                self.output = self.lm.inference(self.lm.input_placeholder)
                saver = tf.train.Saver()
                # Restore variables from disk.
                saver.restore(sess, "model")
                print "Model restored."
                self._runServer(sess)