def main(_):
    with tf.get_default_graph().as_default() as graph:

        # read data [amount, num_steps, mileage, dfswt] == [None, 10, 28, 5]
        test_raw_data = np.load(FLAGS.data_dir + raw_data_name)
        test_label_data = np.load(FLAGS.data_dir + label_data_name)

        # select flow from [density, flow, speed, weekday, time]
        test_raw_data = test_raw_data[:, :, :, 1]
        temp = test_label_data[:, :, :]
        test_label_data = test_label_data[:, :, 1]

        # placeholder
        X_ph = tf.placeholder(
            dtype=tf.float32,
            shape=[FLAGS.batch_size, FLAGS.num_steps, FLAGS.vd_amount],
            name='input_data')
        Y_ph = tf.placeholder(dtype=tf.float32,
                              shape=[FLAGS.batch_size, FLAGS.vd_amount],
                              name='label_data')

        # config setting
        config = TestingConfig()
        config.show()

        # model
        model = model_lstm.TFPModel(config, is_training=True)
        logits_op = model.inference(inputs=X_ph)
        losses_op = model.losses(logits=logits_op, labels=Y_ph)
        mape_op = model.MAPE(logits=logits_op, labels=Y_ph)

        # summary
        labels_summary_writer = tf.summary.FileWriter(FLAGS.log_dir +
                                                      'observation',
                                                      graph=graph)
        logits_summary_writer = tf.summary.FileWriter(FLAGS.log_dir +
                                                      'prediction',
                                                      graph=graph)

        init = tf.global_variables_initializer()
        # saver
        saver = tf.train.Saver()

        # Session
        with tf.Session() as sess:
            sess.run(init)

            saver.restore(sess, FLAGS.checkpoints_dir + '-99')
            print("Successully restored!!")

            # testing
            tp1_result = None
            tp2_result = None
            tp3_result = None
            next_test_raw_data = None
            test_loss_sum = 0.0
            test_mape_sum = 0.0
            flg = True
            counter = 0
            for i in range(len(test_label_data) - 1):

                if temp[i][0][3] == 3:
                    flg = False

                if flg and temp[i][0][3] == 2:
                    counter += 1
                    if tp1_result is None or tp2_result is None or tp3_result is None:
                        current_X_batch = test_raw_data[i:i + 1]
                    else:
                        current_X_batch = next_test_raw_data

                    current_Y_batch = test_label_data[i:i + 1]
                    predicted_value, losses_value, mape_value = sess.run(
                        [logits_op, losses_op, mape_op],
                        feed_dict={
                            X_ph: current_X_batch,
                            Y_ph: current_Y_batch
                        })
                    test_loss_sum += losses_value
                    test_mape_sum += mape_value

                    predicted_value_np = np.array(predicted_value)
                    predicted_value_np = np.reshape(predicted_value_np,
                                                    [1, 1, FLAGS.vd_amount])

                    tp1_result = tp2_result
                    tp2_result = tp3_result
                    tp3_result = predicted_value_np

                    if tp1_result is not None and tp2_result is not None and tp3_result is not None:
                        next_test_raw_data = np.concatenate([
                            test_raw_data[i + 1:i + 2, :-3, :], tp1_result,
                            tp2_result, tp3_result
                        ],
                                                            axis=1)

                    for vd_idx in range(FLAGS.vd_amount):
                        labels_scalar_summary = tf.Summary()
                        labels_scalar_summary.value.add(
                            simple_value=current_Y_batch[0][vd_idx],
                            tag="cmp" + str(vd_idx))
                        labels_summary_writer.add_summary(
                            labels_scalar_summary, global_step=i)
                        labels_summary_writer.flush()

                        logits_scalar_summary = tf.Summary()
                        logits_scalar_summary.value.add(
                            simple_value=predicted_value[0][vd_idx],
                            tag="cmp" + str(vd_idx))
                        logits_summary_writer.add_summary(
                            logits_scalar_summary, global_step=i)
                        logits_summary_writer.flush()

            # test mean loss
            test_mean_loss = test_loss_sum / counter
            test_mean_mape = test_mape_sum / counter

            print("testing mean loss: ", test_mean_loss)
            print("testing mean mape: ", test_mean_mape * 100.0, "%")
Ejemplo n.º 2
0
def main(_):
    with tf.get_default_graph().as_default() as graph:
        global_steps = tf.train.get_or_create_global_step(graph=graph)

        # read data
        raw_data_t = np.load(FLAGS.data_dir + raw_data_name)
        label_data_t = np.load(FLAGS.data_dir + label_data_name)

        # select flow from [density, flow, speed, weekday, time]
        raw_data_t = raw_data_t[:, :, :, 1]
        label_data_t = label_data_t[:, 0:14, 1]

        # concat for later shuffle
        concat = np.c_[raw_data_t.reshape(len(raw_data_t), -1),
                       label_data_t.reshape(len(label_data_t), -1)]
        raw_data = concat[:, :raw_data_t.size // len(raw_data_t)].reshape(
            raw_data_t.shape)
        label_data = concat[:, raw_data_t.size // len(raw_data_t):].reshape(
            label_data_t.shape)
        del raw_data_t
        del label_data_t

        np.random.shuffle(concat)

        train_raw_data_t, test_raw_data = np.split(
            raw_data, [raw_data.shape[0] * 8 // 9])
        train_label_data_t, test_label_data = np.split(
            label_data, [label_data.shape[0] * 8 // 9])

        # concat for later shuffle
        concat = np.c_[train_raw_data_t.reshape(len(train_raw_data_t), -1),
                       train_label_data_t.reshape(len(train_label_data_t), -1)]
        train_raw_data = concat[:, :train_raw_data_t.size //
                                len(train_raw_data_t)].reshape(
                                    train_raw_data_t.shape)
        train_label_data = concat[:, train_raw_data_t.size //
                                  len(train_raw_data_t):].reshape(
                                      train_label_data_t.shape)
        del train_raw_data_t
        del train_label_data_t

        # placeholder
        X_ph = tf.placeholder(
            dtype=tf.float32,
            shape=[FLAGS.batch_size, FLAGS.num_steps, FLAGS.vd_amount],
            name='input_data')
        Y_ph = tf.placeholder(dtype=tf.float32,
                              shape=[FLAGS.batch_size, FLAGS.vd_amount / 2],
                              name='label_data')

        # config setting
        config = TestingConfig()
        config.show()

        # model
        model = model_lstm.TFPModel(config, is_training=True)
        logits_op = model.inference(inputs=X_ph)
        loss_op = model.losses(logits=logits_op, labels=Y_ph)
        train_op = model.train(loss=loss_op, global_step=global_steps)
        mape_op = model.MAPE(logits=logits_op, labels=Y_ph)

        # summary
        merged_op = tf.summary.merge_all()
        train_summary_writer = tf.summary.FileWriter(FLAGS.log_dir + 'train',
                                                     graph=graph)
        test_summary_writer = tf.summary.FileWriter(FLAGS.log_dir + 'test',
                                                    graph=graph)

        init = tf.global_variables_initializer()
        # saver
        saver = tf.train.Saver()

        # Session
        with tf.Session(config=tf.ConfigProto(
                log_device_placement=False)) as sess:
            sess.run(init)
            for epoch_steps in range(FLAGS.total_epoches):
                # # shuffle
                np.random.shuffle(concat)

                # training
                train_loss_sum = 0.0
                train_batches_amount = len(train_raw_data) // FLAGS.batch_size
                for i in range(train_batches_amount):
                    temp_id = i * FLAGS.batch_size
                    current_X_batch = train_raw_data[temp_id:temp_id +
                                                     FLAGS.batch_size]
                    current_Y_batch = train_label_data[temp_id:temp_id +
                                                       FLAGS.batch_size]
                    summary, _, loss_value, steps = \
                        sess.run([merged_op, train_op, loss_op, global_steps], feed_dict={
                                X_ph: current_X_batch, Y_ph: current_Y_batch})
                    train_summary_writer.add_summary(summary,
                                                     global_step=steps)
                    train_loss_sum += loss_value

                # testing
                test_loss_sum = 0.0
                mape_loss_sum = 0.0
                test_batches_amount = len(test_raw_data) // FLAGS.batch_size
                for i in range(test_batches_amount):
                    temp_id = i * FLAGS.batch_size
                    current_X_batch = test_raw_data[temp_id:temp_id +
                                                    FLAGS.batch_size]
                    current_Y_batch = test_label_data[temp_id:temp_id +
                                                      FLAGS.batch_size]
                    test_loss_value, mape_loss_value = sess.run(
                        [loss_op, mape_op],
                        feed_dict={
                            X_ph: current_X_batch,
                            Y_ph: current_Y_batch
                        })
                    test_loss_sum += test_loss_value
                    mape_loss_sum += mape_loss_value

                # train mean ephoch loss
                train_mean_loss = train_loss_sum / train_batches_amount
                train_scalar_summary = tf.Summary()
                train_scalar_summary.value.add(simple_value=train_mean_loss,
                                               tag="mean loss")
                train_summary_writer.add_summary(train_scalar_summary,
                                                 global_step=steps)
                train_summary_writer.flush()

                # test mean ephoch loss
                test_mean_loss = test_loss_sum / test_batches_amount
                mape_mean = (mape_loss_sum / test_batches_amount) * 100.0
                test_scalar_summary = tf.Summary()
                test_scalar_summary.value.add(simple_value=test_mean_loss,
                                              tag="mean loss")
                test_scalar_summary.value.add(simple_value=mape_mean,
                                              tag="mean mape (%)")
                test_summary_writer.add_summary(test_scalar_summary,
                                                global_step=steps)
                test_summary_writer.flush()

                print("ephoches: ", epoch_steps, "trainng loss: ",
                      train_mean_loss, "testing loss: ", test_mean_loss,
                      "testing mape: ", mape_mean, "%")

                if (epoch_steps + 1) % 50 == 0:
                    # Save the variables to disk.
                    save_path = saver.save(sess,
                                           FLAGS.checkpoints_dir,
                                           global_step=epoch_steps)
                    print("Model saved in file: %s" % save_path)
Ejemplo n.º 3
0
def main(_):
    with tf.get_default_graph().as_default() as graph:

        # read data [amount, num_steps, mileage, dfswt] == [None, 10, 28, 5]
        test_raw_data = np.load(FLAGS.data_dir + raw_data_name)
        test_label_data = np.load(FLAGS.data_dir + label_data_name)

        # select flow from [density, flow, speed, weekday, time, day]
        test_raw_data = test_raw_data[:, :, :, 1]
        test_label_all = test_label_data[:, 0:14, :]
        test_label_data = test_label_data[:, 0:14, 1]

        # placeholder
        X_ph = tf.placeholder(dtype=tf.float32,
                              shape=[None, FLAGS.num_steps, FLAGS.vd_amount],
                              name='input_data')
        Y_ph = tf.placeholder(dtype=tf.float32,
                              shape=[None, FLAGS.vd_amount / 2],
                              name='label_data')
        Z_ph = tf.placeholder(dtype=tf.float32,
                              shape=[None, FLAGS.vd_amount],
                              name='shift_input_data')

        # config setting
        config = TestingConfig()
        config.show()

        # model
        model = model_lstm.TFPModel(config, is_training=True)
        logits_op = model.inference(inputs=X_ph)
        losses_op = model.losses(logits=logits_op, labels=Y_ph)
        l1_loss_op = model.losses(logits=logits_op,
                                  labels=Y_ph,
                                  is_squared=False,
                                  is_reduction=True)
        l1_losses_op = model.losses(logits=logits_op,
                                    labels=Y_ph,
                                    is_squared=False,
                                    is_reduction=False)

        l2_loss_op = model.losses(logits=logits_op,
                                  labels=Y_ph,
                                  is_squared=True,
                                  is_reduction=True)
        l2_losses_op = model.losses(logits=logits_op,
                                    labels=Y_ph,
                                    is_squared=True,
                                    is_reduction=False)

        mape_op = model.MAPE(logits=logits_op, labels=Y_ph)

        init = tf.global_variables_initializer()

        # saver
        saver = tf.train.Saver()

        # Session
        with tf.Session() as sess:
            sess.run(init)

            saver.restore(sess, FLAGS.checkpoints_dir + '-99')
            print("Successully restored!!")

            special_points = []
            losses_value_all = []
            i = 0
            while i < len(test_label_data) - FLAGS.batch_size:

                data = test_raw_data[i:i + FLAGS.batch_size]
                label = test_label_data[i:i + FLAGS.batch_size]

                predicted_value, losses_value = sess.run(
                    [logits_op, l1_losses_op],
                    feed_dict={
                        X_ph: data,
                        Y_ph: label
                    })

                # for ptr, value in enumerate(losses_value):
                #     for item in value:
                #         if item > 400:
                #             special_points.append(ptr+i)
                #             break

                # print("ephoches: ", i, "trainng loss: ", losses_value)

                losses_value_all.append(losses_value)
                i += FLAGS.batch_size
            print("save loss.. successful XD")

            print(np.array(losses_value_all).shape)
            losses_value_all = np.concatenate(losses_value_all, axis=0)
            np.save("loss_lstm_" + raw_data_name, losses_value_all)

            # special_day = []
            # for ptr in test_label_all:
            #     # speed
            #     for vd in ptr:
            #         if vd[2] < 20 and ( the_time(vd[4]) > 800 and the_time(vd[4]) < 2400 ):
            #             if len(special_day) > 0 and special_day[-1] == vd[5]:
            #                 continue
            #             special_day.append(vd[5])
            #             break
            # np.save("special_day",special_day)

            if FLAGS.day is None:
                # testing all data
                predicted_value, losses_value, mape_value = sess.run(
                    [logits_op, losses_op, mape_op],
                    feed_dict={
                        X_ph: test_raw_data,
                        Y_ph: test_label_data
                    })

                print("testing mean loss: ", losses_value)
                print("testing mean mape: ", mape_value * 100.0, "%")
            else:
                # summary
                density_summary_writer = tf.summary.FileWriter(FLAGS.log_dir +
                                                               'density')
                flow_summary_writer = tf.summary.FileWriter(FLAGS.log_dir +
                                                            'flow')
                speed_summary_writer = tf.summary.FileWriter(FLAGS.log_dir +
                                                             'speed')

                predict_flow_summary_writer = tf.summary.FileWriter(
                    FLAGS.log_dir + 'prediction_flow')
                # predict_speed_summary_writer = tf.summary.FileWriter(
                #     FLAGS.log_dir + 'prediction_speed')
                losses_summary_writer = tf.summary.FileWriter(FLAGS.log_dir +
                                                              'l2_losses')

                # draw specific day
                test_loss_sum = 0.0
                test_mape_sum = 0.0
                amount_counter = 0

                for i, _ in enumerate(test_label_data):
                    if test_label_all[i][0][5] == FLAGS.day:
                        interval_id = 0
                        offset = i
                        while interval_id < (1440 // FLAGS.interval):
                            if test_label_all[offset][0][
                                    4] // FLAGS.interval != interval_id:
                                for vd_idx in range(FLAGS.vd_amount // 2):

                                    write_data(density_summary_writer, 0,
                                               test_label_all[i][0][3], vd_idx,
                                               interval_id * FLAGS.interval)
                                    write_data(flow_summary_writer, 0,
                                               test_label_all[i][0][3], vd_idx,
                                               interval_id * FLAGS.interval)
                                    write_data(speed_summary_writer, 0,
                                               test_label_all[i][0][3], vd_idx,
                                               interval_id * FLAGS.interval)

                                    write_data(predict_flow_summary_writer, 0,
                                               test_label_all[i][0][3], vd_idx,
                                               interval_id * FLAGS.interval)
                                    # write_data(predict_speed_summary_writer, 0, test_label_all[i][0][3], vd_idx, interval_id*FLAGS.interval)
                                    write_data(losses_summary_writer, 0,
                                               test_label_all[i][0][3], vd_idx,
                                               interval_id * FLAGS.interval)
                            else:
                                offset += 1
                                amount_counter += 1
                                current_X_batch = test_raw_data[offset:offset +
                                                                1]
                                current_Y_batch = test_label_data[
                                    offset:offset + 1]
                                predicted_value, losses_value, mape_value = sess.run(
                                    [logits_op, l1_losses_op, mape_op],
                                    feed_dict={
                                        X_ph: current_X_batch,
                                        Y_ph: current_Y_batch
                                    })
                                test_loss_sum += losses_value
                                test_mape_sum += mape_value

                                for vd_idx in range(FLAGS.vd_amount // 2):

                                    write_data(
                                        density_summary_writer,
                                        test_label_all[offset][vd_idx][0],
                                        test_label_all[i][0][3], vd_idx,
                                        interval_id * FLAGS.interval)
                                    write_data(
                                        flow_summary_writer,
                                        test_label_all[offset][vd_idx][1],
                                        test_label_all[i][0][3], vd_idx,
                                        interval_id * FLAGS.interval)
                                    write_data(
                                        speed_summary_writer,
                                        test_label_all[offset][vd_idx][2],
                                        test_label_all[i][0][3], vd_idx,
                                        interval_id * FLAGS.interval)

                                    write_data(predict_flow_summary_writer,
                                               predicted_value[0][vd_idx],
                                               test_label_all[i][0][3], vd_idx,
                                               interval_id * FLAGS.interval)
                                    # write_data(predict_speed_summary_writer, predicted_value[0][vd_idx][1], test_label_all[i][0][3], vd_idx, interval_id*FLAGS.interval)
                                    write_data(losses_summary_writer,
                                               losses_value,
                                               test_label_all[i][0][3], vd_idx,
                                               interval_id * FLAGS.interval)

                            interval_id += 1
                            if test_label_all[offset][0][
                                    4] < 100 and interval_id > 200:
                                break

                        print("WEEK:", test_label_all[i][0][3])
                        break

                # test mean loss
                test_mean_loss = test_loss_sum / amount_counter
                test_mean_mape = test_mape_sum / amount_counter

                print("testing mean loss: ", test_mean_loss)
                print("testing mean mape: ", test_mean_mape * 100.0, "%")
Ejemplo n.º 4
0
def main(_):
    with tf.get_default_graph().as_default() as graph:

        # read data [amount, num_steps, mileage, dfswt] == [None, 10, 28, 5]
        test_raw_data = np.load(FLAGS.data_dir + raw_data_name)
        test_label_data = np.load(FLAGS.data_dir + label_data_name)

        # select flow from [density, flow, speed, weekday, time]
        temp = test_label_data[:, :, :]
        test_label_data = test_label_data[:, :, 1]

        # placeholder
        X_ph = tf.placeholder(
            dtype=tf.float32,
            shape=[FLAGS.batch_size, FLAGS.num_steps, FLAGS.vd_amount, 5],
            name='input_data')
        Y_ph = tf.placeholder(dtype=tf.float32,
                              shape=[FLAGS.batch_size, FLAGS.vd_amount],
                              name='label_data')

        # config setting
        config = TestingConfig()
        config.show()

        # model
        model = model_lstm.TFPModel(config, is_training=True)
        logits_op = model.inference(inputs=X_ph)
        losses_op = model.losses(logits=logits_op, labels=Y_ph)
        mape_op = model.MAPE(logits=logits_op, labels=Y_ph)

        # summary
        labels_summary_writer = tf.summary.FileWriter(FLAGS.log_dir +
                                                      'observation',
                                                      graph=graph)
        logits_summary_writer = tf.summary.FileWriter(FLAGS.log_dir +
                                                      'prediction',
                                                      graph=graph)

        init = tf.global_variables_initializer()
        # saver
        saver = tf.train.Saver()

        # Session
        with tf.Session() as sess:
            sess.run(init)

            saver.restore(sess, FLAGS.checkpoints_dir + '-99')
            print("Successully restored!!")

            # testing
            test_loss_sum = 0.0
            test_mape_sum = 0.0
            flg = True
            for i, _ in enumerate(test_label_data):
                current_X_batch = test_raw_data[i:i + 1]
                current_Y_batch = test_label_data[i:i + 1]
                predicted_value, losses_value, mape_value = sess.run(
                    [logits_op, losses_op, mape_op],
                    feed_dict={
                        X_ph: current_X_batch,
                        Y_ph: current_Y_batch
                    })
                test_loss_sum += losses_value
                test_mape_sum += mape_value

                # if temp[i][0][3] == 4:
                #     flg = False

                # if flg and temp[i][0][3] == 3:
                #     # if i < 60 * 24 * 4:

                #     current_X_batch = test_raw_data[i:i + 1]
                #     current_Y_batch = test_label_data[i:i + 1]
                #     predicted_value, losses_value, mape_value = sess.run([logits_op, losses_op, mape_op], feed_dict={
                #         X_ph: current_X_batch, Y_ph: current_Y_batch})
                #     test_loss_sum += losses_value
                #     test_mape_sum += mape_value

                #     for vd_idx in range(FLAGS.vd_amount):
                #         labels_scalar_summary = tf.Summary()
                #         labels_scalar_summary.value.add(
                #             simple_value=current_Y_batch[0][vd_idx], tag="cmp" + str(vd_idx))
                #         labels_summary_writer.add_summary(
                #             labels_scalar_summary, global_step=i)
                #         labels_summary_writer.flush()

                #         logits_scalar_summary = tf.Summary()
                #         logits_scalar_summary.value.add(
                #             simple_value=predicted_value[0][vd_idx], tag="cmp" + str(vd_idx))
                #         logits_summary_writer.add_summary(
                #             logits_scalar_summary, global_step=i)
                #         logits_summary_writer.flush()

            # test mean loss
            test_mean_loss = test_loss_sum / len(test_label_data)
            test_mean_mape = test_mape_sum / len(test_label_data)

            print("testing mean loss: ", test_mean_loss)
            print("testing mean mape: ", test_mean_mape * 100.0, "%")
Ejemplo n.º 5
0
def main(_):
    with tf.get_default_graph().as_default() as graph:

        # read data [amount, num_steps, mileage, dfswt] == [None, 10, 28, 5]
        test_raw_data = np.load(FLAGS.data_dir + raw_data_name)
        test_label_data = np.load(FLAGS.data_dir + label_data_name)

        # select flow from [density, flow, speed, weekday, time, day]
        test_raw_data = test_raw_data[:, :, :, 1]
        test_label_all = test_label_data[:, :, :]
        test_label_data = test_label_data[:, :, 1]

        # placeholder
        X_ph = tf.placeholder(dtype=tf.float32,
                              shape=[None, FLAGS.num_steps, FLAGS.vd_amount],
                              name='input_data')
        Y_ph = tf.placeholder(dtype=tf.float32,
                              shape=[None, FLAGS.vd_amount],
                              name='label_data')

        # config setting
        config = TestingConfig()
        config.show()

        # model
        model = model_lstm.TFPModel(config, is_training=True)
        logits_op = model.inference(inputs=X_ph)
        losses_op = model.losses(logits=logits_op, labels=Y_ph)
        mape_op = model.MAPE(logits=logits_op, labels=Y_ph)

        init = tf.global_variables_initializer()
        # saver
        saver = tf.train.Saver()

        # np saver
        loss_saver = []

        # Session
        with tf.Session() as sess:
            sess.run(init)

            saver.restore(sess, FLAGS.checkpoints_dir + '-99')
            print("Successully restored!!")

            if FLAGS.day is None:
                pass
            else:
                # testing data of specific day
                # summary
                predict_loss_summary_writer = tf.summary.FileWriter(
                    FLAGS.log_dir + 'predicted_loss', graph=graph)
                # draw specific day
                amount_counter = 0
                for i, _ in enumerate(test_label_data):
                    if test_label_all[i][0][5] == FLAGS.day:
                        interval_id = 0
                        offset = i
                        while interval_id < (1440 // FLAGS.interval):
                            if test_label_all[offset][0][
                                    4] // FLAGS.interval != interval_id:
                                for vd_idx in range(FLAGS.vd_amount):
                                    predict_losses_scalar_summary = tf.Summary(
                                    )
                                    predict_losses_scalar_summary.value.add(
                                        simple_value=0,
                                        tag="DAY:" + the_date(FLAGS.day) +
                                        "WEEK: " +
                                        str(test_label_all[i][0][3]) + " VD:" +
                                        str(vd_idx))
                                    predict_loss_summary_writer.add_summary(
                                        predict_losses_scalar_summary,
                                        global_step=the_time(interval_id *
                                                             FLAGS.interval))
                                    predict_loss_summary_writer.flush()
                            else:
                                offset += 1
                                amount_counter += 1
                                current_X_batch = test_raw_data[offset:offset +
                                                                1]
                                current_Y_batch = test_label_data[
                                    offset:offset + 1]
                                predicted_value = sess.run(
                                    logits_op,
                                    feed_dict={X_ph: current_X_batch})

                                for vd_idx in range(FLAGS.vd_amount):
                                    predict_losses_scalar_summary = tf.Summary(
                                    )
                                    predict_losses_scalar_summary.value.add(
                                        simple_value=predicted_value[0]
                                        [vd_idx],
                                        tag="DAY:" + the_date(FLAGS.day) +
                                        "WEEK: " +
                                        str(test_label_all[i][0][3]) + " VD:" +
                                        str(vd_idx))
                                    predict_loss_summary_writer.add_summary(
                                        predict_losses_scalar_summary,
                                        global_step=the_time(interval_id *
                                                             FLAGS.interval))
                                    predict_loss_summary_writer.flush()

                            interval_id += 1
                            if test_label_all[offset][0][
                                    4] < 100 and interval_id > 200:
                                break

                        print("WEEK:", test_label_all[i][0][3])
                        break