Exemplo n.º 1
0
 def packet_handler(self, packet, address):
     packetid = ord(packet[0])
     if packetid < 100:
         processing.get_logger().warning('internal packet unhandled: "%s"' % self.transport.packet_type_info(packetid))
         return self
     p = self.packet_types[packetid]().from_string(packet)
     p.sender = address
     self.queue_message(p)
     return self
Exemplo n.º 2
0
def main(argv=None):
    logger = process.get_logger()
    logger.info("Training started.")
    train_data = process.get_train_dataset()
    test_data = process.get_test_dataset()
    train(train_data, test_data)
    logger.info("Training ended.")
Exemplo n.º 3
0
def evaluate():
    logger = process.get_logger()

    with tf.Graph().as_default() as g:

        x = tf.placeholder(
            tf.int32,
            [None, None],
            name="x-input"
        )
        
        y = inference.inference(x, process.get_lex_len(), None)

        tf.argmax(y, 1)

        saver = tf.train.Saver()
        with tf.Session() as sess:
            ckpt = tf.train.get_checkpoint_state(
                const.MODEL_DIR)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                global_step = ckpt.model_checkpoint_path\
                                    .split("/")[-1].split("-")[-1]
                feed_x = process.get_vec_from_text(const.PREDICT_TEXT)
                # feed_x = feed_x.any()
                y = sess.run(y, feed_dict={x: feed_x})
                print("After %s training step(s), text: '%s', "
                        "prediction: %s" % (global_step, const.PREDICT_TEXT, str(y)))
                logger.info("After %s training step(s), text: '%s', "
                    "prediction = %s" % (global_step, const.PREDICT_TEXT, str(y)))
            else:
                print("No checkpoint file found")
                return
Exemplo n.º 4
0
 def add_process_group(self, name, default_actor_class=None, max_tick_time=None, interval=.03):
     '''adds a process group to the pool'''
     assert self.is_main_process, 'Pysage currently only supports spawning child groups from the Main Group'
     self.validate_groups_mode()
     self._ipc_listen()
     # make sure we have a str
     g = str(name)
     if self.groups.has_key(g):
         raise GroupAlreadyExists('Group name "%s" already exists.' % g) 
     server_addr = self.ipc_transport.address
     # shared should quit switch
     switch = processing.Value('B', 0)
     actor_class = default_actor_class or DefaultActor
     p = processing.Process(target=_subprocess_main, name=name, args=(name, actor_class, max_tick_time, interval, server_addr, switch, self.packet_types))
     p.start()
     processing.get_logger().info('started group "%s" in process "%s"' % (name, processing.get_pid(p)))
     _clientid = self.ipc_transport.accept()
     self.groups[g] = (p, _clientid, switch)
Exemplo n.º 5
0
def main(argv=None):
    test_data = process.get_test_dataset()
    test_xs, test_ys, _ = process.get_next_batch(test_data, 0, len(test_data))

    global_step, acc_value = evaluate(test_xs, test_ys)
    print("Global step: #%d, accuracy: %g" % (global_step, acc_value))
    logger = process.get_logger()
    logger.info("Global step: #%d, accuracy: %g" % (global_step, acc_value))
    return 0
Exemplo n.º 6
0
def main(argv=None):
    logger = process.get_logger()
    logger.info("Training (CNN) started")
    # logger.info("Training step: %g", const.TRAINING_STEPS)
    logger.info("Learning rate: %g", const.LEARNING_RATE_BASE)

    train()
    logger.info("Training (CNN) ended")

    return 0
Exemplo n.º 7
0
def inference(input_tensor, input_size,
              is_train):  # input_size here is too large
    if is_train is not None:
        dropout_keep_prob = 0.5
        logger = process.get_logger()
        logger.debug("dropout: %g" % dropout_keep_prob)
    else:
        dropout_keep_prob = 1.0
    # embedding layer
    with tf.device('/cpu:0'), tf.name_scope("embedding"):
        embedding_size = 128
        W = tf.Variable(tf.random_uniform([input_size, embedding_size],
                                          minval=-1.0,
                                          maxval=1.0),
                        name="embedding")
        embedded_chars = tf.nn.embedding_lookup(W, input_tensor)
        # embedded_chars = tf.nn.dropout(embedded_chars, dropout_keep_prob)
        embedded_chars_expanded = tf.expand_dims(embedded_chars, -1)
    # convolution + maxpool layer
    num_filters = 128
    filter_sizes = [3, 4, 5]
    pooled_outputs = []
    for i, filter_size in enumerate(filter_sizes):
        with tf.name_scope("conv-maxpool-%s" % filter_size):
            filter_shape = [filter_size, embedding_size, 1, num_filters]
            W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1))
            b = tf.Variable(tf.constant(0.1, shape=[num_filters]))
            conv = tf.nn.conv2d(embedded_chars_expanded,
                                W,
                                strides=[1, 1, 1, 1],
                                padding="VALID")
            h = tf.nn.relu(tf.nn.bias_add(conv, b))
            pooled = tf.nn.max_pool(
                h,
                ksize=[1, input_size - filter_size + 1, 1, 1],
                strides=[1, 1, 1, 1],
                padding='VALID')
            pooled_outputs.append(pooled)

    num_filters_total = num_filters * len(filter_sizes)
    h_pool = tf.concat(pooled_outputs, 3)
    h_pool_flat = tf.reshape(h_pool, [-1, num_filters_total])
    # dropout
    with tf.name_scope("dropout"):
        h_drop = tf.nn.dropout(h_pool_flat, dropout_keep_prob)
    # output
    with tf.name_scope("output"):
        W = tf.get_variable("W",
                            shape=[num_filters_total, num_classes],
                            initializer=tf.contrib.layers.xavier_initializer())
        b = tf.Variable(tf.constant(0.1, shape=[num_classes]))
        output = tf.nn.xw_plus_b(h_drop, W, b)

    return output
Exemplo n.º 8
0
def evaluate():
    logger = process.get_logger()
    
    with tf.Graph().as_default() as g:

        xs, ys = process.get_test_dataset()


        x = tf.placeholder(
            tf.int32,
            [None,
            None],
            name="x-input"
        )
        y_ = tf.placeholder(
            tf.float32, [None, inference.OUTPUT_NODE], name="y_-input")

        validate_feed = {x: xs,
                         y_:ys}

        y = inference.inference(x, process.get_lex_len(), None)

        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        # variable_averages = tf.train.ExponentialMovingAverage(
        #     mnist_train.MOVING_AVERAGE_DECAY)
        # variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver() #variables_to_restore)


        with tf.Session() as sess:
            ckpt = tf.train.get_checkpoint_state(
                const.MODEL_DIR)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                global_step = ckpt.model_checkpoint_path\
                                    .split("/")[-1].split("-")[-1]
                accuracy_score = sess.run(accuracy,
                                            feed_dict=validate_feed)
                print("After %s training step(s), validation "
                        "accuracy = %g" % (global_step, accuracy_score))
                logger.info("After %s training step(s), validation "
                    "accuracy = %g" % (global_step, accuracy_score))
            else:
                print("No checkpoint file found")
                return
    return accuracy_score
Exemplo n.º 9
0
def train_neural_network():
    Train = process.Data_Process()
    input_size = Train.max_name_length
    voc_len = Train.voc_len

    x = tf.placeholder(tf.int32, [None, input_size], "input-x")
    y_ = tf.placeholder(tf.float32, [None, const.NUM_CLASSES], "input-y_")
    dropout_keep_prob = tf.placeholder(tf.float32)
    global_step = tf.Variable(0, False)

    y = model.model(x, input_size, voc_len, dropout_keep_prob)

    optimizer = tf.train.AdamOptimizer(1e-3)
    loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_))
    grads_and_vars = optimizer.compute_gradients(loss)
    train_op = optimizer.apply_gradients(grads_and_vars, global_step)

    saver = tf.train.Saver(tf.global_variables())
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        ckpt = tf.train.get_checkpoint_state(const.MODEL_DIR)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)

        # for i in range(201):
        while Train.cur_epoch < const.EPOCH:
            batch_xs, batch_ys = Train.get_train_batch()
            _, loss_ = sess.run([train_op, loss],
                                feed_dict={
                                    x: batch_xs,
                                    y_: batch_ys,
                                    dropout_keep_prob: 0.5
                                })
            print(global_step.eval(), loss_)
            logger = process.get_logger()
            logger.info("Global_step: %d, loss: %g", global_step.eval(), loss_)
            # 保存模型
            if global_step.eval() % 50 == 0:
                saver.save(sess,
                           os.path.join(const.MODEL_DIR, const.MODEL_NAME),
                           global_step=global_step)
Exemplo n.º 10
0
def train():
    logger = process.get_logger()
    x = tf.placeholder(tf.int32, [None, None], name="x-input")
    y_ = tf.placeholder(tf.float32, [None, inference.OUTPUT_NODE],
                        name="y_-input")
    is_train = tf.placeholder("bool")

    # the last accuracy, for evaluate
    pre_acc = tf.Variable(0.0, False, name="pre_accuracy")

    # regularizer = tf.contrib.layers.l2_regularizer(const.REGULARIZATION_RATE)
    y = inference.inference(x, process.get_lex_len(), is_train)

    global_step = tf.Variable(0, trainable=False, name="global_step")

    # variable_averages = tf.train.ExponentialMovingAverage(
    #     MOVING_AVERAGE_DECAY, global_step)
    # variable_averages_op = variable_averages.apply(
    #     tf.trainable_variables())

    # cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
    #     logits=y, labels=tf.argmax(y_, 1))
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=y,
                                                            labels=y_)
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean  #+ tf.add_n(tf.get_collection("losses"))

    # learning_rate = tf.train.exponential_decay(
    #     LEARNING_RATE_BASE,
    #     global_step,
    #     mnist.train.num_examples / BATCH_SIZE,
    #     LEARNING_RATE_DECAY)
    learning_rate = const.LEARNING_RATE_BASE
    # train_step = tf.train.GradientDescentOptimizer(learning_rate)\
    optimizer = tf.train.AdamOptimizer(learning_rate)
    grads_and_vars = optimizer.compute_gradients(loss)
    train_op = optimizer.apply_gradients(grads_and_vars)
    # with tf.control_dependencies([train_step, variable_averages_op]):
    #     train_op = tf.no_op(name="train")

    # for evaluate
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        ckpt = tf.train.get_checkpoint_state(const.MODEL_DIR)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
        # writer = tf.summary.FileWriter(const.TENSORBOARD_LOG_DIR, tf.get_default_graph())

        # for evaluate
        test_xs, test_ys = process.get_test_dataset()

        is_first_round = True
        # for i in range(const.TRAINING_STEPS):
        while True:
            xs, ys = process.get_next_batch(40)
            # _, loss_value, step = sess.run([train_step, loss, global_step],
            #                                feed_dict={x: xs, y_: ys, is_train: True})
            _, loss_value, step = sess.run([train_op, loss, global_step],
                                           feed_dict={
                                               x: xs,
                                               y_: ys,
                                               is_train: True
                                           })

            if is_first_round:
                cur_acc = sess.run(accuracy, {
                    x: test_xs[0:50],
                    y_: test_ys[0:50],
                    is_train: False
                })  # 过大会内存爆炸
                logger.info(
                    "After the first train, %s training step(s), validation "
                    "accuracy = %g" % (step, cur_acc))
                logger.info("While pre_accuracy is: %g" % pre_acc.eval())
                is_first_round = False

            # if i % 1000 == 0:
            # if i % 50 == 0:
            # if step % 50 == 0:
            if step % 2 == 0:
                print("After %d training step(s), loss on training "
                      "batch is %g." % (step, loss_value))
                logger.info("After %d training step(s), loss on training "
                            "batch is %g." % (step, loss_value))

                # for evaluate
                # current_acc
                cur_acc, test_loss = sess.run((accuracy, loss), {
                    x: test_xs[0:50],
                    y_: test_ys[0:50],
                    is_train: False
                })
                logger.info("After %s training step(s), validation "
                            "accuracy = %g, loss = %g" %
                            (step, cur_acc, test_loss))
                logger.info("While pre_accuracy is: %g" % pre_acc.eval())
                if cur_acc >= pre_acc.eval():
                    saver.save(sess,
                               os.path.join(const.MODEL_DIR, const.MODEL_NAME),
                               global_step=global_step)
                    sess.run(tf.assign(pre_acc, cur_acc))
                if cur_acc > 0.8:
                    break
Exemplo n.º 11
0
def main(argv=None):
    logger = process.get_logger()
    logger.info("Predict started")
    evaluate()
    logger.info("Predict ended")
Exemplo n.º 12
0
import os
import datetime

try:
    import pyraknet
except ImportError:
    RAKNET_AVAILABLE = False
else:
    RAKNET_AVAILABLE = True


class NotConnectedException(Exception):
    pass


logger = processing.get_logger()


class Transport(object):
    '''an interface that all transports must implement'''
    def connect(self, host, port):
        '''connects to a server implementing the same interface'''
        pass

    def disconnect(self):
        '''disconnects all clients and itself'''
        pass

    def listen(self, host, port, connection_handler):
        '''listens for connections, and calls connection_handler upon new connections'''
        pass
Exemplo n.º 13
0
 def handle_message(self, msg):
     processing.get_logger().info('Default actor received message "%s"' % msg)
     return False
Exemplo n.º 14
0
# Messaging.py
# Credits: 
#   this module builds on the concepts of a trigger system introduced in
#   the book: "Game Coding Complete - 2nd Edition" by Mike McShaffry
# this module implements a message manager along with message receivers
import collections
import time
import util
import process as processing
import inspect

logger = processing.get_logger()

WildCardMessageType = '*'

def MessageID():
    '''generates unique message IDs per runtime'''
    i = 0
    while True:
        yield i
        i += 1
messageID = MessageID()

class InvalidMessageProperty(Exception):
    pass

class MessageReceiver(object):
    '''generic message receiver class that game object inherits from'''
    # message types this message receiver will subscribe to
    subscriptions = []
    def __init__(self):
Exemplo n.º 15
0
def get_logger():
    return processing.get_logger()
Exemplo n.º 16
0
def train(train_data, test_data):
    # x = tf.placeholder(tf.float32, [None, const.IMAGE_HEIGHT * const.IMAGE_WIDTH], "x_input")
    x = tf.placeholder(
        tf.float32,
        [None, const.IMAGE_HEIGHT, const.IMAGE_WIDTH, const.NUM_CHANNELS],
        "x-input")
    y_ = tf.placeholder(tf.float32,
                        [None, const.MAX_CAPTCHA * const.CHAR_SET_LEN],
                        "y_-input")
    global_step = tf.Variable(0, False)
    # keep_prob = tf.placeholder(tf.float32) # dropout
    regularizer = tf.contrib.layers.l2_regularizer(const.REGULARIZATION_RATE)
    # TODO 好像有问题
    # y = inference.inference(x, True, regularizer)
    y = inference.inference(x, True, None)

    # loss
    #之前tensorflow没升级的时候,用的是targets,当升级到1.3时,用的是labels
    # cross_entropy_mean = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1)))
    cross_entropy_mean = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(logits=y, labels=y_))
    # 最后一层用来分类的softmax和sigmoid有什么不同?
    # answer: https://github.com/zhengwh/captcha-tensorflow

    loss = cross_entropy_mean  #+ tf.add_n(tf.get_collection('losses'))

    learning_rate = tf.train.exponential_decay(
        const.LEARNING_RATE_BASE, global_step,
        const.TRAINING_STEPS / const.BATCH_SIZE, const.DECAY_LEARNING_RATE)
    # 优化器,optimizer 为了加快训练 learning_rate应该开始大,然后慢慢衰
    # optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss, global_step)
    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(
        loss, global_step)

    # == end ==

    predict = tf.reshape(y, [-1, const.MAX_CAPTCHA, const.CHAR_SET_LEN])

    #函数会返回tensor中参数指定的维度中的最大值的索引
    #最后输出的是一个,4*26维度的向量,所以最大的索引值为4个,和正确的结果进行对比
    max_idx_p = tf.argmax(predict, 2)
    max_idx_l = tf.argmax(
        tf.reshape(y_, [-1, const.MAX_CAPTCHA, const.CHAR_SET_LEN]), 2)

    #通过tf.equal方法可以比较预测结果与实际结果是否相等:
    correct_pred = tf.equal(max_idx_p, max_idx_l)

    #这行代码返回一个布尔列表。为得到哪些预测是正确的,我们可用如下代码将布尔值转换成浮点数:
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    # global train_data
    #Tensorflow针对这一需求提供了Saver类,保存模型,恢复模型变量。
    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        ckpt = tf.train.get_checkpoint_state(const.MODEL_DIR)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)

        logger = process.get_logger()

        cursor = 0
        for i in range(const.TRAINING_STEPS):
            xs, ys, cursor = process.get_next_batch(train_data, cursor)
            sess.run(optimizer, {x: xs, y_: ys})
            if i % 50 == 0:
                saver.save(sess, os.path.join(const.MODEL_DIR,
                                              const.MODEL_NAME))

                loss_value, cross_entropy_mean_value = sess.run(
                    (loss, cross_entropy_mean), {
                        x: xs,
                        y_: ys
                    })

                # test_xs, test_ys, _ = process.get_next_batch(test_data, 0, len(test_data))

                # # acc_value = sess.run(accuracy, {x: test_xs, y_: test_ys})
                # acc_value, global_step_value = evalidate.evalidate(test_xs, test_ys)

                # logger.info("Global step: #%d, loss: %g, accuracy: %g" % (global_step.eval(), loss_value, acc_value))
                # logger.info("Global step: #%d, loss: %g, accuracy: %g" % (global_step_value, loss_value, acc_value))
                logger.info(
                    "Global step: #%d, loss: %g, cross_entropy_mean: %g" %
                    (global_step.eval(), loss_value, cross_entropy_mean_value))

        saver.save(sess, os.path.join(const.MODEL_DIR, const.MODEL_NAME),
                   global_step)

        loss_value, cross_entropy_mean_value = sess.run(
            (loss, cross_entropy_mean), {
                x: xs,
                y_: ys
            })

        # test_xs, test_ys, _ = process.get_next_batch(test_data, 0, len(test_data))

        # # acc_value = sess.run(accuracy, {x: test_xs, y_: test_ys})
        # acc_value, global_step_value = evalidate.evalidate(test_xs, test_ys)

        # logger.info("Global step: #%d (final step), loss: %g, accuracy: %g" % (global_step.eval(), loss_value, acc_value))
        logger.info(
            "Global step: #%d (final step), loss: %g, cross_entropy_mean: %g" %
            (global_step.eval(), loss_value, cross_entropy_mean_value))
Exemplo n.º 17
0
def main(argv=None):
    logger = process.get_logger()
    logger.info("Evaluation started")
    evaluate()
    logger.info("Evaluation ended")
Exemplo n.º 18
0
 def log(self, level, msg):
     '''process aware logging'''
     return processing.get_logger().log(level, msg)