Example #1
0
def evaluate(network):
    with tf.Graph().as_default() as g:
        eval_data = FLAGS.eval_data == 'test'

        if network == 1:
            images, labels = cifar10.inputs(eval_data=eval_data)
            logits, w1, b1, w2, b2 = cifar10.create_model(images)
        else:
            images, labels = cifar10_2.inputs(eval_data=eval_data)
            logits, w1, b1, w2, b2 = cifar10_2.create_model(images)

        top_k_op = tf.nn.in_top_k(logits, labels, 1)

        variable_averages = tf.train.ExponentialMovingAverage(
            cifar10.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        summary_op = tf.summary.merge_all()

        summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, g)

        while True:
            eval_once(saver, summary_writer, top_k_op, summary_op)
            if FLAGS.run_once:
                break
            time.sleep(FLAGS.eval_interval_secs)
Example #2
0
def train():
    with tf.Graph().as_default():
        global_step = tf.contrib.framework.get_or_create_global_step()
        images, labels = cifar10.inputs(False)
        logits, fc1_w, fc2_w, fc1_b, fc2_b = cifar10.create_model(images)
        loss = cifar10.loss(logits, labels)

        regularizers = (tf.nn.l2_loss(fc1_w) + tf.nn.l2_loss(fc1_b) +
                        tf.nn.l2_loss(fc2_w) + tf.nn.l2_loss(fc2_b))
        loss += 5e-4 * regularizers

        train_op = cifar10.train(loss, global_step)

        class _LoggerHook(tf.train.SessionRunHook):
            def begin(self):
                self._step = -1
                self._start_time = time.time()

            def before_run(self, run_context):
                self._step += 1
                return tf.train.SessionRunArgs(loss)

            def after_run(self, run_context, run_values):
                if self._step % FLAGS.log_frequency == 0:
                    current_time = time.time()
                    duration = current_time - self._start_time
                    self._start_time = current_time

                    loss_value = run_values.results
                    examples_per_sec = FLAGS.log_frequency * FLAGS.batch_size / duration
                    sec_per_batch = float(duration / FLAGS.log_frequency)

                    format_str = (
                        '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
                        'sec/batch)')
                    print(format_str % (datetime.now(), self._step, loss_value,
                                        examples_per_sec, sec_per_batch))

        with tf.train.MonitoredTrainingSession(
                checkpoint_dir=FLAGS.train_dir,
                hooks=[
                    tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
                    tf.train.NanTensorHook(loss),
                    _LoggerHook()
                ],
                config=tf.ConfigProto(
                    log_device_placement=FLAGS.log_device_placement,
                    allow_soft_placement=True)) as mon_sess:

            while not mon_sess.should_stop():
                mon_sess.run(train_op)
Example #3
0
def main(args):
    debug = (args.debug == 'True')
    print(args)
    np.random.seed(args.seed)
    with tf.Graph().as_default():
        train_dataset, num_train_file = DateSet(args.file_list, args, debug)
        test_dataset, num_test_file = DateSet(args.test_list, args, debug)
        list_ops = {}

        batch_train_dataset = train_dataset.batch(args.batch_size).repeat()
        train_iterator = batch_train_dataset.make_one_shot_iterator()
        train_next_element = train_iterator.get_next()

        batch_test_dataset = test_dataset.batch(args.batch_size).repeat()
        test_iterator = batch_test_dataset.make_one_shot_iterator()
        test_next_element = test_iterator.get_next()

        list_ops['num_train_file'] = num_train_file
        list_ops['num_test_file'] = num_test_file

        model_dir = args.model_dir

        print('Total number of examples: {}'.format(num_train_file))
        print('Test number of examples: {}'.format(num_test_file))
        print('Model dir: {}'.format(model_dir))

        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        list_ops['global_step'] = global_step
        list_ops['train_dataset'] = train_dataset
        list_ops['test_dataset'] = test_dataset
        list_ops['train_next_element'] = train_next_element
        list_ops['test_next_element'] = test_next_element

        epoch_size = num_train_file // args.batch_size
        print('Number of batches per epoch: {}'.format(epoch_size))

        image_batch = tf.placeholder(tf.float32,
                                     shape=(None, args.image_size,
                                            args.image_size, 3),
                                     name='image_batch')
        landmark_batch = tf.placeholder(tf.float32,
                                        shape=(None, 196),
                                        name='landmark_batch')

        list_ops['image_batch'] = image_batch
        list_ops['landmark_batch'] = landmark_batch

        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
        list_ops['phase_train_placeholder'] = phase_train_placeholder

        print('Building training graph.')

        landmarks_pre, landmarks_loss = create_model(image_batch,
                                                     landmark_batch,
                                                     phase_train_placeholder,
                                                     args)
        get_param_num()

        L2_loss = tf.add_n(tf.losses.get_regularization_losses())

        loss_sum = tf.reduce_sum(tf.square(landmark_batch - landmarks_pre),
                                 axis=1)
        loss_sum = tf.reduce_mean(loss_sum)
        loss_sum += L2_loss

        train_op, lr_op = train_model(loss_sum, global_step, num_train_file,
                                      args)

        list_ops['landmarks'] = landmarks_pre
        list_ops['L2_loss'] = L2_loss
        list_ops['loss'] = loss_sum
        list_ops['train_op'] = train_op
        list_ops['lr_op'] = lr_op

        test_mean_error = tf.Variable(tf.constant(0.0),
                                      dtype=tf.float32,
                                      name='ME')
        test_failure_rate = tf.Variable(tf.constant(0.0),
                                        dtype=tf.float32,
                                        name='FR')
        test_10_loss = tf.Variable(tf.constant(0.0),
                                   dtype=tf.float32,
                                   name='TestLoss')
        train_loss = tf.Variable(tf.constant(0.0),
                                 dtype=tf.float32,
                                 name='TrainLoss')
        train_loss_l2 = tf.Variable(tf.constant(0.0),
                                    dtype=tf.float32,
                                    name='TrainLoss2')
        tf.summary.scalar('test_mean_error', test_mean_error)
        tf.summary.scalar('test_failure_rate', test_failure_rate)
        tf.summary.scalar('test_10_loss', test_10_loss)
        tf.summary.scalar('train_loss', train_loss)
        tf.summary.scalar('train_loss_l2', train_loss_l2)

        save_params = tf.trainable_variables()
        saver = tf.train.Saver(save_params, max_to_keep=None)
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)

        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                allow_soft_placement=False,
                                                log_device_placement=False))
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        with sess.as_default():
            epoch_start = 0
            if args.pretrained_model:
                pretrained_model = args.pretrained_model
                if (not os.path.isdir(pretrained_model)):
                    print('Restoring pretrained model: {}'.format(
                        pretrained_model))
                    saver.restore(sess, args.pretrained_model)
                else:
                    print('Model directory: {}'.format(pretrained_model))
                    ckpt = tf.train.get_checkpoint_state(pretrained_model)
                    model_path = ckpt.model_checkpoint_path
                    assert (ckpt and model_path)
                    epoch_start = int(
                        model_path[model_path.find('model.ckpt-') + 11:]) + 1
                    print('Checkpoint file: {}'.format(model_path))
                    saver.restore(sess, model_path)

            # if args.save_image_example:
            #     save_image_example(sess, list_ops, args)

            print('Running train.')

            merged = tf.summary.merge_all()
            train_write = tf.summary.FileWriter(log_dir, sess.graph)
            for epoch in range(epoch_start, args.max_epoch):
                start = time.time()
                train_L, train_L2 = train(sess, epoch_size, epoch, list_ops)
                print("train time: {}".format(time.time() - start))

                checkpoint_path = os.path.join(model_dir, 'model.ckpt')
                metagraph_path = os.path.join(model_dir, 'model.meta')
                saver.save(sess,
                           checkpoint_path,
                           global_step=epoch,
                           write_meta_graph=False)
                if not os.path.exists(metagraph_path):
                    saver.export_meta_graph(metagraph_path)

                start = time.time()
                test_ME, test_FR, test_loss = test(sess, list_ops, args)
                print("test time: {}".format(time.time() - start))

                summary, _, _, _, _, _ = sess.run([
                    merged,
                    test_mean_error.assign(test_ME),
                    test_failure_rate.assign(test_FR),
                    test_10_loss.assign(test_loss),
                    train_loss.assign(train_L),
                    train_loss_l2.assign(train_L2)
                ])
                train_write.add_summary(summary, epoch)
Example #4
0
import model1
import model2
from datetime import datetime

# testing
dt = datetime.strptime("20210523", "%Y%m%d").date()

# test model2
column_list = ['date', 'close', 'real', 
        'gold', 'comp', 'spx', 'indu', 'oil', 
        'btc_diff', 'gold_diff', 'comp_diff', 'spx_diff', 'indu_diff', 'oil_diff',
        'btc_diffpct', 'gold_diffpct', 'comp_diffpct', 'spx_diffpct', 'indu_diffpct', 'oil_diffpct',
        'timestamp']

input_list = column_list[3:8] # ['gold', 'comp', 'sp500', 'indu', 'oil', 'timestamp']
input_list.append('timestamp')
print(input_list)
in_dict = {
    input_list[0] : [40],
    input_list[1] : [14000],
    input_list[2] : [43000],
    input_list[3] : [35000],
    input_list[4] : [20],
    input_list[5] : [1621555200000000000]
}
model2.create_model(input_list)
print(f"Model2 Predict Price: {model2.predict(in_dict)}")
def main(args):
    debug = (args.debug == 'True')
    print(args)
    np.random.seed(args.seed)
    with tf.Graph().as_default():
        train_dataset, num_train_file = DateSet(args.file_list, args, debug)
        test_dataset, num_test_file = DateSet(args.test_list, args, debug)
        list_ops = {}

        batch_train_dataset = train_dataset.batch(args.batch_size).repeat()
        train_iterator = batch_train_dataset.make_one_shot_iterator()
        train_next_element = train_iterator.get_next()

        batch_test_dataset = test_dataset.batch(args.batch_size).repeat()
        test_iterator = batch_test_dataset.make_one_shot_iterator()
        test_next_element = test_iterator.get_next()

        list_ops['num_train_file'] = num_train_file
        list_ops['num_test_file'] = num_test_file

        model_dir = args.model_dir
        print(model_dir)
        if 'test' in model_dir and debug and os.path.exists(model_dir):
            import shutil
            shutil.rmtree(model_dir)
        assert not os.path.exists(model_dir)
        os.mkdir(model_dir)

        print('Total number of examples: {}'.format(num_train_file))
        print('Test number of examples: {}'.format(num_test_file))
        print('Model dir: {}'.format(model_dir))

        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)
        list_ops['global_step'] = global_step
        list_ops['train_dataset'] = train_dataset
        list_ops['test_dataset'] = test_dataset
        list_ops['train_next_element'] = train_next_element
        list_ops['test_next_element'] = test_next_element

        epoch_size = num_train_file // args.batch_size
        print('Number of batches per epoch: {}'.format(epoch_size))

        image_batch = tf.placeholder(tf.float32,
                                     shape=(None, args.image_size,
                                            args.image_size, 3),
                                     name='image_batch')
        landmark_batch = tf.placeholder(tf.float32,
                                        shape=(None, 136),
                                        name='landmark_batch')
        attribute_batch = tf.placeholder(tf.int32,
                                         shape=(None, 6),
                                         name='attribute_batch')
        euler_angles_gt_batch = tf.placeholder(tf.float32,
                                               shape=(None, 3),
                                               name='euler_angles_gt_batch')

        list_ops['image_batch'] = image_batch
        list_ops['landmark_batch'] = landmark_batch
        list_ops['attribute_batch'] = attribute_batch
        list_ops['euler_angles_gt_batch'] = euler_angles_gt_batch

        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
        list_ops['phase_train_placeholder'] = phase_train_placeholder

        print('Building training graph.')
        landmarks_pre, euler_angles_pre = create_model(
            image_batch, landmark_batch, phase_train_placeholder, args)

        _sum_k = tf.reduce_sum(tf.map_fn(
            lambda x: 1 - tf.cos(abs(x)),
            euler_angles_gt_batch - euler_angles_pre),
                               axis=1)

        # attributes_w_n = tf.to_float(attribute_batch[:, 1:6])
        # mat_ratio = tf.reduce_mean(attributes_w_n, axis=0)
        # # bbb = tf.map_fn(lambda x: 1.0 / x if not x == 0.0 else float(args.batch_size), bbb)
        # mat_ratio = tf.where(tf.equal(mat_ratio, 0.0), (mat_ratio + 1) / float(args.batch_size), 1.0 / mat_ratio)
        # attributes_w_n = tf.reduce_sum(attributes_w_n * mat_ratio, axis=1)

        regularization_loss = tf.add_n(tf.losses.get_regularization_losses())

        # contour_loss = WingLoss(landmark_batch[:, 0:34], landmarks_pre[:, 0:34], 4.0, 0.50)
        # inner_brow_loss = 3*WingLoss(landmark_batch[:, 34:54], landmarks_pre[:, 34:54], 4.0, 0.50)
        # inner_nose_loss = 3*WingLoss(landmark_batch[:, 54:72], landmarks_pre[:, 54:72], 4.0, 0.50)
        # inner_eye_loss = 9*WingLoss(landmark_batch[:, 72:96], landmarks_pre[:, 72:96], 4.0, 0.50)
        # inner_mouth_loss = 15*WingLoss(landmark_batch[:, 96:136], landmarks_pre[:, 96:136], 4.0, 0.50)
        # loss_sum = tf.add_n([contour_loss, inner_brow_loss, inner_nose_loss, inner_eye_loss, inner_mouth_loss])
        # contour_loss = tf.reduce_mean(contour_loss * _sum_k)
        # inner_brow_loss = tf.reduce_mean(inner_brow_loss * _sum_k)
        # inner_nose_loss = tf.reduce_mean(inner_nose_loss * _sum_k)
        # inner_eye_loss = tf.reduce_mean(inner_eye_loss * _sum_k)
        # inner_mouth_loss = tf.reduce_mean(inner_mouth_loss * _sum_k)

        # loss_sum = L2Loss(landmark_batch, landmarks_pre)
        loss_sum = WingLoss(landmark_batch, landmarks_pre, 4.0, 0.50)
        # loss_sum = tf.reduce_mean(loss_sum*_sum_k*attributes_w_n)
        loss_sum = tf.reduce_mean(loss_sum * _sum_k)
        loss_sum += regularization_loss

        # tf.summary.scalar("contour_loss", contour_loss)
        # tf.summary.scalar("inner_brow_loss", inner_brow_loss)
        # tf.summary.scalar("inner_nose_loss", inner_nose_loss)
        # tf.summary.scalar("inner_eye_loss", inner_eye_loss)
        # tf.summary.scalar("inner_mouth_loss", inner_mouth_loss)
        tf.summary.scalar("loss", loss_sum)

        save_params = tf.trainable_variables()
        # variables_to_train = [v for v in save_params if v.name.split('/', 2)[1] == 'fc'
        #                       or v.name.split('/', 1)[0] == 'pfld_conv1'
        #                       or v.name.split('/', 1)[0] == 'pfld_conv2'
        #                       or v.name.split('/', 1)[0] == 'pfld_conv3'
        #                       or v.name.split('/', 1)[0] == 'pfld_conv4'
        #                       or v.name.split('/', 1)[0] == 'pool1'
        #                       or v.name.split('/', 1)[0] == 'Flatten'
        #                       or v.name.split('/', 1)[0] == 'pfld_fc1'
        #                       or v.name.split('/', 1)[0] == 'pfld_fc2']
        train_op, lr_op = train_model(loss_sum, global_step, num_train_file,
                                      args, save_params)

        list_ops['landmarks'] = landmarks_pre
        list_ops['train_op'] = train_op
        list_ops['lr_op'] = lr_op
        list_ops['regularization_loss'] = regularization_loss
        list_ops['loss'] = loss_sum
        # list_ops['contour_loss'] = contour_loss
        # list_ops['inner_brow_loss'] = inner_brow_loss
        # list_ops['inner_nose_loss'] = inner_nose_loss
        # list_ops['inner_eye_loss'] = inner_eye_loss
        # list_ops['inner_mouth_loss'] = inner_mouth_loss

        # from tensorflow.contrib.framework.python.framework import checkpoint_utils
        # var_list = checkpoint_utils.list_variables("./pretrained_models/model.ckpt-51")
        # for v in var_list:
        #     print(v)

        # 只加载部分权重,除了最后的输出fc层,其它层的权重都加载
        # variables_to_restore = [v for v in save_params if v.name.split('/', 2)[1] != 'fc']
        # restorer = tf.train.Saver(variables_to_restore, max_to_keep=None)

        restorer = tf.train.Saver(save_params, max_to_keep=None)
        saver = tf.train.Saver(save_params, max_to_keep=None)

        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.80)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                allow_soft_placement=False,
                                                log_device_placement=False))
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        with sess.as_default():
            if args.pretrained_model:
                pretrained_model = args.pretrained_model
                if (not os.path.isdir(pretrained_model)):
                    print('Restoring pretrained model: {}'.format(
                        pretrained_model))
                    restorer.restore(sess, args.pretrained_model)
                else:
                    print('Model directory: {}'.format(pretrained_model))
                    ckpt = tf.train.get_checkpoint_state(pretrained_model)
                    model_path = ckpt.model_checkpoint_path
                    assert (ckpt and model_path)
                    print('Checkpoint file: {}'.format(model_path))
                    restorer.restore(sess, model_path)

            if args.save_image_example:
                save_image_example(sess, list_ops, args)

            merged = tf.summary.merge_all()
            summary_writer = tf.summary.FileWriter(args.loss_log, sess.graph)
            sess.graph.finalize()  # 查看循环内是否增加新的图节点
            print('Running train.')
            for epoch in range(args.max_epoch):
                train(sess, epoch_size, epoch, args.max_epoch, list_ops,
                      merged, summary_writer)
                checkpoint_path = os.path.join(model_dir, 'model.ckpt')
                metagraph_path = os.path.join(model_dir, 'model.meta')
                saver.save(sess,
                           checkpoint_path,
                           global_step=epoch,
                           write_meta_graph=False)
                if not os.path.exists(metagraph_path):
                    saver.export_meta_graph(metagraph_path)

                test(sess, list_ops, args)
Example #6
0
def main(args):
    debug = (args.debug == 'True')
    print(args)
    np.random.seed(args.seed)
    with tf.Graph().as_default():
        train_dataset, num_train_file = DateSet(args.file_list, args, debug)
        test_dataset, num_test_file = DateSet(args.test_list, args, debug)
        list_ops = {}

        batch_train_dataset = train_dataset.batch(args.batch_size).repeat()
        train_iterator = batch_train_dataset.make_one_shot_iterator()
        train_next_element = train_iterator.get_next()

        batch_test_dataset = test_dataset.batch(args.batch_size).repeat()
        test_iterator = batch_test_dataset.make_one_shot_iterator()
        test_next_element = test_iterator.get_next()

        list_ops['num_train_file'] = num_train_file
        list_ops['num_test_file'] = num_test_file

        model_dir = args.model_dir
        if 'test' in model_dir and debug and os.path.exists(model_dir):
            import shutil
            shutil.rmtree(model_dir)
        assert not os.path.exists(model_dir)
        os.mkdir(model_dir)

        print('Total number of examples: {}'.format(num_train_file))
        print('Test number of examples: {}'.format(num_test_file))
        print('Model dir: {}'.format(model_dir))

        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)
        list_ops['global_step'] = global_step
        list_ops['train_dataset'] = train_dataset
        list_ops['test_dataset'] = test_dataset
        list_ops['train_next_element'] = train_next_element
        list_ops['test_next_element'] = test_next_element

        epoch_size = num_train_file // args.batch_size
        print('Number of batches per epoch: {}'.format(epoch_size))

        image_batch = tf.placeholder(tf.float32, shape=(None, args.image_size, args.image_size, 3),\
                                     name='image_batch')
        landmark_batch = tf.placeholder(tf.float32,
                                        shape=(None, 196),
                                        name='landmark_batch')
        attribute_batch = tf.placeholder(tf.int32,
                                         shape=(None, 6),
                                         name='attribute_batch')
        euler_angles_gt_batch = tf.placeholder(tf.float32,
                                               shape=(None, 3),
                                               name='euler_angles_gt_batch')
        w_n = tf.placeholder(tf.float32, shape=(None), name='w_n')

        list_ops['image_batch'] = image_batch
        list_ops['landmark_batch'] = landmark_batch
        list_ops['attribute_batch'] = attribute_batch
        list_ops['euler_angles_gt_batch'] = euler_angles_gt_batch
        list_ops['w_n'] = w_n

        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
        list_ops['phase_train_placeholder'] = phase_train_placeholder

        print('Building training graph.')
        # total_loss, landmarks, heatmaps_loss, heatmaps= create_model(image_batch, landmark_batch,\
        #                                                                                phase_train_placeholder, args)

        landmarks_pre, landmarks_loss,euler_angles_pre = create_model(image_batch, landmark_batch,\
                                                                              phase_train_placeholder, args)

        L2_loss = tf.add_n(tf.losses.get_regularization_losses())
        _sum_k = tf.reduce_sum(tf.map_fn(lambda x: 1 - tf.cos(abs(x)), \
                                         euler_angles_gt_batch - euler_angles_pre), axis=1)

        loss_sum = tf.reduce_sum(tf.square(landmark_batch - landmarks_pre),
                                 axis=1)
        loss_sum = tf.reduce_mean(loss_sum * _sum_k * w_n)
        loss_sum += L2_loss

        train_op, lr_op = train_model(loss_sum, global_step, num_train_file,
                                      args)

        list_ops['landmarks'] = landmarks_pre
        list_ops['L2_loss'] = L2_loss
        list_ops['loss'] = loss_sum
        list_ops['train_op'] = train_op
        list_ops['lr_op'] = lr_op

        save_params = tf.trainable_variables()
        saver = tf.train.Saver(save_params, max_to_keep=None)
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                allow_soft_placement=False,
                                                log_device_placement=False))
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        with sess.as_default():
            if args.pretrained_model:
                pretrained_model = args.pretrained_model
                if (not os.path.isdir(pretrained_model)):
                    print('Restoring pretrained model: {}'.format(
                        pretrained_model))
                    saver.restore(sess, args.pretrained_model)
                else:
                    print('Model directory: {}'.format(pretrained_model))
                    ckpt = tf.train.get_checkpoint_state(pretrained_model)
                    model_path = ckpt.model_checkpoint_path
                    assert (ckpt and model_path)
                    print('Checkpoint file: {}'.format(model_path))
                    saver.restore(sess, model_path)

            if args.save_image_example:
                save_image_example(sess, list_ops, args)

            print('Running train.')
            for epoch in range(args.max_epoch):
                train(sess, epoch_size, epoch, list_ops)
                checkpoint_path = os.path.join(model_dir, 'model.ckpt')
                metagraph_path = os.path.join(model_dir, 'model.meta')
                saver.save(sess,
                           checkpoint_path,
                           global_step=epoch,
                           write_meta_graph=False)
                if not os.path.exists(metagraph_path):
                    saver.export_meta_graph(metagraph_path)

                test(sess, list_ops, args)