Esempio n. 1
0
def main():
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        zipped_data = zip(train_data, train_label)
        batches = data_helper.gen_batch(list(zipped_data), batch_size,
                                        max_steps)

        for i in range(max_steps):

            batch = next(batches)
            images_batch, labels_batch = zip(*batch)
            feed_dict = {
                images_placeholder: images_batch,
                labels_placeholder: labels_batch
            }

            # Periodically print out the model's current accuracy
            if i % 100 == 0:
                train_accuracy = sess.run(accuracy, feed_dict=feed_dict)
                print('Step {:d}, training accuracy {:g}'.format(
                    i, train_accuracy))

            sess.run([train_step, loss], feed_dict=feed_dict)

        test_accuracy = sess.run(accuracy,
                                 feed_dict={
                                     images_placeholder: test_data,
                                     labels_placeholder: test_label
                                 })
        print('Test accuracy {:g}'.format(test_accuracy))
Esempio n. 2
0
def main():
    labelholder = tf.placeholder(tf.int64, shape=[None], name='labels')
    imageholder = tf.placeholder(tf.float32, shape=[None, 3072], name='images')
    layers = construct(imageholder, 3072, 10, 100)

    with tf.name_scope('En'):
        L = tf.reduce_mean(
            tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=layers, labels=labelholder, name='cross_entropy'))
        tf.summary.scalar('en', L)

    currstep = tf.train.GradientDescentOptimizer(0.001).minimize(
        L, global_step=tf.Variable(0, name='global_step', trainable=False))
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        zdata = zip(data_helper.load_data()['images_train'],
                    data_helper.load_data()['labels_train'])
        batches = data_helper.gen_batch(list(zdata), 400, totalstep)

        for i in range(totalstep):

            batch = next(batches)
            images_batch, labels_batch = zip(*batch)

            if i % 100 == 0:
                with tf.name_scope('a'):
                    accuracy = tf.reduce_mean(
                        tf.cast(tf.equal(tf.argmax(layers, 1), labelholder),
                                tf.float32))
                    tf.summary.scalar('ta', accuracy)
                print('training accuracy for {:d} step is {:g}'.format(
                    i,
                    sess.run(accuracy,
                             feed_dict={
                                 imageholder: images_batch,
                                 labelholder: labels_batch
                             })))

            sess.run([currstep, L],
                     feed_dict={
                         imageholder: images_batch,
                         labelholder: labels_batch
                     })

        print('Accuracy is ' + format(
            sess.run(accuracy,
                     feed_dict={
                         imageholder: data_helper.load_data()['images_test'],
                         labelholder: data_helper.load_data()['labels_test']
                     })))
Esempio n. 3
0
    def train(self, x, y):
        if not os.path.exists(FLAGS.model_path):
            os.makedirs(FLAGS.model_path)
        with tf.device(FLAGS.device_name):
            train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(
                loss=self.loss, name='optimaize')
        saver = tf.train.Saver()
        init_op = tf.global_variables_initializer()
        with tf.Session() as sess:
            print("模型初始化中……")
            sess.run(init_op)
            start_epoch = 0
            checkpoint = tf.train.latest_checkpoint(FLAGS.model_path)
            if checkpoint:
                saver.restore(sess, checkpoint)
                print("成功载入模型{0}".format(checkpoint))
                start_epoch += int(checkpoint.split('-')[-1])
            print("开始训练……")
            total_loss = 0
            try:
                for epoch in range(start_epoch, self.epoches):
                    n_chunk = len(x) // self.batch_size
                    # print(n_chunk)
                    ave_loss = total_loss / n_chunk
                    total_loss = 0
                    for batch in range(n_chunk):
                        x_batch, y_batch = gen_batch(x, y, self.batch_size,
                                                     batch)
                        feed = {self.inputs: x_batch, self.targets: y_batch}
                        l, acc, _ = sess.run(
                            [self.loss, self.accuracy, train_op],
                            feed_dict=feed)
                        total_loss += l

                        if batch % self.every_epoch_to_print == 0:
                            print(
                                'Epoch:%d, last epoch loss ave:%.5f  batch:%d, current epoch loss:%.5f, acc:%.3f'
                                % (epoch, ave_loss, batch, l, acc))
                    if epoch % self.every_epoch_to_save == 0:
                        print("保存模型……")
                        saver.save(sess,
                                   os.path.join(FLAGS.model_path,
                                                FLAGS.model_prefix),
                                   global_step=epoch)
            except KeyboardInterrupt:
                print("手动终止训练,保存模型……")
                saver.save(sess,
                           os.path.join(FLAGS.model_path, FLAGS.model_prefix),
                           global_step=epoch)
# Define saver to save model state at checkpoints
saver = tf.train.Saver()

# -----------------------------------------------------------------------------
# Run the TensorFlow graph
# -----------------------------------------------------------------------------

with tf.Session() as sess:
    # Initialize variables and create summary-writer
    sess.run(tf.global_variables_initializer())
    summary_writer = tf.summary.FileWriter(logdir, sess.graph)

    # Generate input data batches
    zipped_data = zip(data_sets['images_train'], data_sets['labels_train'])
    batches = data_helper.gen_batch(list(zipped_data), FLAGS.batch_size,
                                    FLAGS.max_steps)

    for i in range(FLAGS.max_steps):

        # Get next input data batch
        batch = next(batches)
        images_batch, labels_batch = zip(*batch)
        feed_dict = {
            images_placeholder: images_batch,
            labels_placeholder: labels_batch
        }

        # Periodically print out the model's current accuracy
        if i % 100 == 0:
            train_accuracy = sess.run(accuracy, feed_dict=feed_dict)
            print('Step {:d}, training accuracy {:g}'.format(
Esempio n. 5
0
                      l2_reg_lambda=FLAGS.L2_reg_lambda)

        with tf.device('/gpu:0'):
            train_step = tf.train.GradientDescentOptimizer(
                learning_rate).minimize(loss=cnn.loss, global_step=global_step)
            # train_step = tf.train.AdamOptimizer(1e-3).minimize(loss=cnn.loss, global_step=global_step)

    saver = tf.train.Saver()
    if os.path.exists(FLAGS.model_save_path + 'checkpoint'):
        saver.restore(sess, tf.train.latest_checkpoint(FLAGS.model_save_path))
        learning_rate = tf.constant(0.001, dtype=tf.float32)
    else:
        sess.run(tf.global_variables_initializer())
    last = datetime.datetime.now()
    for i in range(FLAGS.training_ite):
        x_batch, y_batch = data_helper.gen_batch(x_train, y_train, i,
                                                 FLAGS.batch_size)

        feed_dic = {
            cnn.input_x: x_batch,
            cnn.input_y: y_batch,
            cnn.dropout_keep_prob: FLAGS.dropout
        }
        _, loss, acc = sess.run([train_step, cnn.loss, cnn.accuracy],
                                feed_dict=feed_dic)
        if (i % 100) == 0:
            now = datetime.datetime.now()
            print('loss:{},acc:{}---time:{}'.format(loss, acc, now - last))
            last = now
        if (i % 1000) == 0:
            feed_dic = {
                cnn.input_x: x_dev,
Esempio n. 6
0
# Weight decay
Weight_decay = 0.0001
l2 = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])

train_step = tf.train.RMSPropOptimizer(learning_rate=1e-4).minimize(cross_entropy + l2 * Weight_decay, global_step=global_step)

#prediction
correct_prediction = tf.equal(tf.argmax(output,1),tf.argmax(y_,1) )
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
#start session
zipped_data = zip(data_sets['images_train'], data_sets['labels_train'])
batches = data_helper.gen_batch(list(zipped_data), 128, 64124)

zipped_data_test = zip(data_sets['images_test'], data_sets['labels_test'])
batches_test = data_helper.gen_batch(list(zipped_data_test), 128, 64124)


#pp.pprint(zip(*next(batches_test))[1])
epoch_num = 0
max_train_accuracy = 0
max_test_accuracy = 0
min_loss = 99999
for i in range(64124): #391 interation_num * 164 epoch_num
	
 	batch = next(batches)
 	images_batch, labels_batch = zip(*batch)
 	batch_test = next(batches_test)