Esempio n. 1
0
def train():
    with tf.name_scope('input'):
        # train_image_batch, train_labels_batch = input.read_cifar10(TRAIN_PATH, batch_size=BATCH_SIZE)
        train_image_batch, train_labels_batch = input.read_and_decode_by_tfrecorder(TRAIN_PATH, BATCH_SIZE)
        test_image_batch,test_labels_batch = input.read_and_decode_by_tfrecorder(TEST_PATH,BATCH_SIZE)
        print(train_image_batch)
        print(train_labels_batch)
        # show = cv2.imshow('test',train_image_batch[0])
        # wait = cv2.waitKeyEx()

        #logits = alex_net.alex_net(train_image_batch, NUM_CLASS)
        # logits = fcn_net.fcn_net(train_image_batch,NUM_CLASS)
        logits = cifar_net.inference(train_image_batch, batch_size=BATCH_SIZE, n_classes=NUM_CLASS,name="train")
        # logits = VGG.VGG16N(train_image_batch,n_classes=NUM_CLASS,is_pretrain=False)
        #logits = mnistnet.net(train_image_batch,num_class=NUM_CLASS)
        print(logits)
        loss = function.loss(logits=logits, labels=train_labels_batch)
        accuracy_logits = cifar_net.inference(test_image_batch,batch_size=BATCH_SIZE,n_classes=NUM_CLASS,name="test")
        accuracy = function.accuracy(logits=accuracy_logits, labels=test_labels_batch)

        my_global_step = tf.Variable(0, name='global_step')
        train_op = function.optimize(loss=loss, learning_rate=LEARNING_RATE, global_step=my_global_step)

        saver = tf.train.Saver(tf.global_variables())
        summary_op = tf.summary.merge_all()

        init = tf.global_variables_initializer()
        sess = tf.Session()
        sess.run(init)

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        tra_summary_writer = tf.summary.FileWriter(train_log_dir, sess.graph)

    try:
        for step in np.arange(MAX_STEP):

            if coord.should_stop():
                break

            _, train_loss, train_accuracy = sess.run([train_op, loss, accuracy])
            #print('***** Step: %d, loss: %.4f *****' % (step, train_loss))
            if (step % 50 == 0) or (step == MAX_STEP):
                print('***** Step: %d, loss: %.4f' % (step, train_loss))
                summary_str = sess.run(summary_op)
                tra_summary_writer.add_summary(summary_str, step)
            if (step % 200 == 0) or (step == MAX_STEP):
                print('***** Step: %d, loss: %.4f, accuracy: %.4f%% *****' % (step, train_loss, train_accuracy))
                summary_str = sess.run(summary_op)
                tra_summary_writer.add_summary(summary_str, step)
            if step % 2000 == 0 or step == MAX_STEP:
                checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
    except tf.errors.OutOfRangeError:
        print('error')
    finally:
        coord.request_stop()
    coord.join(threads)
    sess.close()
Esempio n. 2
0
def train():
    with tf.name_scope('input'):
        # train_image_batch, train_labels_batch = input.read_cifar10(TRAIN_PATH, batch_size=BATCH_SIZE)
        train_image_batch, train_labels_batch = input.read_and_decode_by_tfrecorder(
            TRAIN_PATH, BATCH_SIZE)
        # test_image_batch, test_labels_batch = input.read_and_decode_by_tfrecorder(TEST_PATH, BATCH_SIZE)
        print(train_image_batch)
        print(train_labels_batch)

        logits = OCnet.inference(train_image_batch,
                                 batch_size=BATCH_SIZE,
                                 n_classes=NUM_CLASS,
                                 name="train")
        # logits = OCnet2.inference(train_image_batch, batch_size=BATCH_SIZE, num_class=NUM_CLASS)

        loss = function.loss(logits=logits, labels=train_labels_batch)
        accuracy_train = function.accuracy(logits=logits,
                                           labels=train_labels_batch)

        my_global_step = tf.Variable(0, name='global_step', trainable=True)
        optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE)
        train_op = optimizer.minimize(loss, global_step=my_global_step)
        saver = tf.train.Saver(tf.global_variables())

        init = tf.global_variables_initializer()
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
        sess.run(init)

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(MAX_STEP):

            if coord.should_stop():
                break

            _, train_loss, train_accuracy = sess.run(
                [train_op, loss, accuracy_train])
            # print('***** Step: %d, loss: %.4f *****' % (step, train_loss))
            if (step % 50 == 0) or (step == MAX_STEP - 1):
                print('***** Step: %d, loss: %.4f' % (step, train_loss))
            if (step % 200 == 0) or (step == MAX_STEP - 1):
                print(
                    '***** Step: %d, loss: %.4f,train Set accuracy: %.4f%% *****'
                    % (step, train_loss, train_accuracy))
            if step % 2000 == 0 or step == MAX_STEP - 1:
                checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
    except tf.errors.OutOfRangeError:
        print('error')
    finally:
        coord.request_stop()
    coord.join(threads)
    sess.close()
Esempio n. 3
0
def train():
    with tf.Graph().as_default():
        global_step = tf.train.get_or_create_global_step()

        with tf.device('/cpu:0'):
            images, labels = function.get_inputs(eval_data=False)
            test_images, test_lables = function.get_inputs(eval_data=True)

        logits = function.inference(images)
        loss = function.loss(logits, labels)
        train_op = function.train_op(loss, global_step)

        test_logits = function.test(test_images)
        correct_prediction = tf.equal(tf.argmax(test_logits, 1),
                                      tf.argmax(test_lables, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        class _LoggerHook(tf.train.SessionRunHook):
            """Logs loss and runtime."""
            def begin(self):
                self._step = -1
                self._start_time = time.time()

            def before_run(self, run_context):
                self._step += 1
                return tf.train.SessionRunArgs(loss)  # Asks for loss value.

            def after_run(self, run_context, run_values):
                if self._step % config.FLAGS.log_frequency == 0:
                    current_time = time.time()
                    duration = current_time - self._start_time
                    self._start_time = current_time

                    loss_value = run_values.results
                    examples_per_sec = config.FLAGS.log_frequency * config.FLAGS.batch_size / duration
                    sec_per_batch = float(duration /
                                          config.FLAGS.log_frequency)

                    format_str = (
                        '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
                        'sec/batch)')
                    print(format_str % (datetime.now(), self._step, loss_value,
                                        examples_per_sec, sec_per_batch))

        with tf.train.MonitoredTrainingSession(hooks=[
                tf.train.StopAtStepHook(last_step=config.FLAGS.max_steps),
                tf.train.NanTensorHook(loss),
                _LoggerHook()
        ], ) as mon_sess:
            while not mon_sess.should_stop():
                print("acc: %s" % mon_sess.run(accuracy))
                mon_sess.run(train_op)
Esempio n. 4
0
def train():
    image_batch, label_batch = input.read_and_decode_by_tfrecorder(
        TRAIN_PATH, TRAIN_BATCH_SIZE, True)
    logits = ENnet.inference(image_batch, TRAIN_BATCH_SIZE, NUME_CLASS)
    loss = function.loss(logits, label_batch)
    accuracy = function.accuracy(logits, label_batch)
    optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE)
    global_step = tf.Variable(0, name="global_step")
    train_op = optimizer.minimize(loss, global_step=global_step)
    saver = tf.train.Saver()
    init = tf.initialize_all_variables()
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)

    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        sess.run(init)
        coord = tf.train.Coordinator()
        thread = tf.train.start_queue_runners(sess, coord)

        try:
            for step in range(MAX_STEP + 1):
                if coord.should_stop():
                    print("coord is stop")
                    break
                acc, _, losses = sess.run([accuracy, train_op, loss])
                if step % 100 == 0:
                    print(
                        "step : %d ,loss: %.4f , accuracy on trainSet: %.4f%%"
                        % (step, losses, acc))
                if step % 10000 == 0 or step == MAX_STEP:
                    checkpoint_dir = os.path.join(RESULT_PATH, "model.ckpt")
                    saver.save(sess, checkpoint_dir, global_step=step)
        except tf.errors.OutOfRangeError:
            print("error")
        finally:
            coord.request_stop()
        coord.join(threads=thread)
        sess.close()
Esempio n. 5
0
    # Define the network for each GPU
    for i in xrange(num_gpus):
        with tf.device('/gpu:%d' % i):
            with tf.name_scope('Tower_%d' % (i)) as scope:
                with tf.variable_scope(tf.get_variable_scope()) as scope:
                    # grab this portion of the input
                    next_batch = all_input[i * batch_size:(i + 1) *
                                           batch_size, :]

                    # Construct the model
                    z_x_mean, z_x_log_sigma_sq, z_x, x_tilde, l_x_tilde, x_p, d_x, l_x, d_x_p, z_p, d_x_tilde = inference(
                        next_batch)

                    # Calculate the loss for this tower
                    SSE_loss, KL_loss, D_loss, G_loss, LL_loss = loss(
                        next_batch, x_tilde, z_x_log_sigma_sq, z_x_mean, d_x,
                        d_x_p, l_x, l_x_tilde, d_x_tilde, dim1, dim2, dim3)

                    # specify loss to parameters
                    params = tf.trainable_variables()
                    E_params = [i for i in params if 'enc' in i.name]
                    G_params = [i for i in params if 'gen' in i.name]
                    D_params = [i for i in params if 'dis' in i.name]

                    # Calculate the losses specific to encoder, generator, decoder
                    L_e = tf.clip_by_value(KL_loss * KL_param + LL_loss, -100,
                                           100)
                    L_g = tf.clip_by_value(
                        LL_loss * LL_param - D_loss * G_param, -100, 100)
                    L_d = tf.clip_by_value(D_loss, -100, 100)