예제 #1
0
 def on_message(self, message):
     # py中接受字符串调用json.loads方法
     msg = json.loads(message)
     if msg["type"] == "reg":
         # 并发会出错
         self.reg()
     elif msg["type"] == "data":
         image_arr = msg["imagedata"].split(",")
         # print(len(image_arr))
         rgba_img = np.array(image_arr).reshape(28,28,4).astype('uint8')
         rgba_img = rgba_img[:,:,3].reshape(1,28,28,1)
         # print((rgba_img[:,:,:3] == 0).all())
         # rgba_img = np.array(image_arr).astype('uint8')
         # rgb_img = Image.fromarray(rgba_img).convert('L')
         # print((np.array(list(rgb_img.getdata())) == 0).all() )
         # rgb_img = np.array(rgb_img).astype("float32")
         # rgb_img = rgb_img.reshape(1,28,28,1)
         # gravity= np.array([0.2989,0.5870,0.1140])
         # grayscale = np.dot(rgb_img,gravity).reshape(-1)
         # print((grayscale == 0).all())
         # result = grayscale.reshape(1, 28,28,1)
         infer(rgba_img)
         # print(infer(result))
         # with open("", "w") as f:
         #     f.write(str(grayscale))
         # print(grayscale.shape)
         # plt.imshow(grayscale, cmap='gray')
         # plt.show()
         # print(rgb_img.shape)
예제 #2
0
def train_definer():
    with tf.variable_scope("input"):
        x = tf.placeholder(dtype=tf.float32, shape=[None, 784], name="input-x")
        y = tf.placeholder(dtype=tf.int64, shape=[None], name="input-y")
    regularizer = tf.contrib.layers.l2_regularizer(REGULATIZATION_RATE)
    logits = mnist_infer.infer(x, regularizer)
    global_step = tf.get_variable("global_step",
                                  shape=[],
                                  initializer=tf.zeros_initializer(),
                                  trainable=False)
    #global_step = tf.Variable(0, trainable=False)
    with tf.variable_scope("ema"):
        ema = tf.train.ExponentialMovingAverage(decay=EMA_DECAY,
                                                num_updates=global_step)
        ema_op = ema.apply(tf.trainable_variables())
    with tf.variable_scope("loss"):
        accuracy = tf.reduce_mean(
            tf.cast(tf.equal(tf.argmax(logits, axis=1), y), tf.float32))
        cross_entropy = tf.reduce_mean(
            tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,
                                                           logits=logits))
        loss = cross_entropy + tf.add_n(tf.get_collection("loss"))
    with tf.variable_scope("optimizer"):
        learning_rate = tf.train.exponential_decay(
            learning_rate=learning_rate_base,
            global_step=global_step,
            decay_steps=decay_steps,
            decay_rate=learning_rate_decay,
            staircase=True)
        train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(
            loss, global_step=global_step)
        with tf.control_dependencies([train_op, ema_op]):
            agg = tf.no_op(name="agg")
    return x, y, logits, agg, accuracy, loss, global_step
예제 #3
0
파일: mnist_train.py 프로젝트: toddyan/tf
def train(x_train, y_train):
    x = tf.placeholder(tf.float32,
                       shape=(None, mnist_infer.N[0]),
                       name="input-x")
    y = tf.placeholder(tf.float32,
                       shape=(None, mnist_infer.N[-1]),
                       name="input-y")
    regularizer = tf.contrib.layers.l2_regularizer(regularization_rate)
    z = mnist_infer.infer(x, regularizer)
    global_step = tf.Variable(0, trainable=False)
    ema = tf.train.ExponentialMovingAverage(moving_average_decay, global_step)
    ema_updater = ema.apply(tf.trainable_variables())
    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits_v2(logits=z, labels=y))
    loss = cross_entropy + tf.add_n(tf.get_collection("loss"))
    learning_rate = tf.train.exponential_decay(learning_rate_base, global_step,
                                               x_train.shape[0] / batch_size,
                                               learning_rate_decay)
    optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        loss, global_step=global_step)
    with tf.control_dependencies([ema_updater, optimizer]):
        trainer = tf.no_op(name="trainer")
    saver = tf.train.Saver()
    with tf.Session() as s:
        tf.global_variables_initializer().run()
        for epoch in range(training_epochs):
            start = 0
            while start < x_train.shape[0]:
                end = min(x_train.shape[0], start + batch_size)
                xs = x_train[start:end]
                ys = y_train[start:end]
                _, loss_value, step = s.run([trainer, loss, global_step],
                                            feed_dict={
                                                x: xs,
                                                y: ys
                                            })
                start = end
            z_train = s.run(z, feed_dict={x: x_train})
            acc = (z_train.argmax(axis=1) == y_train.argmax(axis=1)).mean()
            print("epoch[%d] loss=%f acc=%f" % (epoch, loss_value, acc))
            saver.save(s,
                       os.path.join(model_savepath, model_name),
                       global_step=epoch)
    writer = tf.summary.FileWriter(log_savepath, tf.get_default_graph())
    writer.close()
예제 #4
0
def train(mnist_data):
    x = tf.placeholder(tf.float32, [None, mnist_infer.input_layer_size],
                       name='x')
    y_ = tf.placeholder(tf.float32, [None, mnist_infer.output_layer_size],
                        name='y_')

    regularizer = tf.contrib.layers.l2_regularizer(regularizer_lambda)
    y = mnist_infer.infer(x, regularizer)

    step = tf.Variable(0, dtype=tf.int32, shape=[], trainable=False)
    variable_average = tf.train.ExponentialMovingAverage(
        moving_average_decay, step)
    moving_average_op = variable_average.apply(tf.trainable_variables())

    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=y, labels=tf.argmax(y_, 1))
    cross_mean = tf.reduce_mean(cross_entropy)
    loss = cross_mean + tf.add_n(tf.get_collection("losses"))

    lr = tf.train.exponential_decay(learning_rate_base, step,
                                    mnist_data.train.num_examples / batch_size,
                                    learning_rate_decay)
    train_step = tf.train.GradientDescentOptimizer(lr).minimize(
        loss, global_step=step)

    with tf.control_dependencies([train_step, moving_average_op]):
        train_op = tf.no_op(name="train")

    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        for i in range(train_step_num):
            xs, ys = mnist_data.train.next_batch(batch_size)
            #print ("lh_debug", xs, ys)
            _, iloss, istep = sess.run([train_op, loss, step],
                                       feed_dict={
                                           x: xs,
                                           y_: ys
                                       })
            print("lh_debug:: step:", sess.run(step))