Esempio n. 1
0
def model_h(data):
    data_group = data.reset_index(drop=True)
    data_ = {
        tf.contrib.timeseries.TrainEvalFeatures.TIMES:
        np.array(data_group.index),
        tf.contrib.timeseries.TrainEvalFeatures.VALUES:
        np.array(data_group.values)
    }
    reader = NumpyReader(data_)
    train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
        reader, batch_size=data_group.shape[0], window_size=10)
    LSTM = ts_estimators.TimeSeriesRegressor(
        model=TS._LSTMModel(num_features=1, num_units=128),
        optimizer=tf.train.AdamOptimizer(0.001))
    LSTM.train(input_fn=train_input_fn, steps=500)

    evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
    evaluation = LSTM.evaluate(input_fn=evaluation_input_fn, steps=3)

    (predictions, ) = tuple(
        LSTM.predict(input_fn=tf.contrib.timeseries.
                     predict_continuation_input_fn(evaluation, steps=5)))

    observed_times = evaluation["times"][0]
    observed = evaluation["observed"][0, :, :]
    evaluated_times = evaluation["times"][0]
    evaluated = evaluation["mean"][0]
    predicted_times = predictions['times']
    predicted = predictions["mean"]

    return observed, evaluated, predicted
def main(_):
    # 1. 定义 x 和 y
    x = np.array(range(1000))
    noise = np.random.uniform(-0.2, 0.2, 1000)
    y = np.sin(np.pi * x / 100) + x / 200. + noise
    plt.plot(x, y)
    plt.savefig('2_timeseries_y.jpg')

    # 2. 生成 x 和 y 的data字典
    data = {
        tf.contrib.timeseries.TrainEvalFeatures.TIMES: x,
        tf.contrib.timeseries.TrainEvalFeatures.VALUES: y,
    }

    # 3. 创建reader,生成batch数据
    reader = NumpyReader(data)
    train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
        reader, batch_size=16, window_size=40)  # 每个batch中有16个序列,每个序列长度为40

    # 4. 定义 AR 模型(Autoaggressive model),是统计学上处理时间序列模型的基本方法
    ar = tf.contrib.timeseries.ARRegressor(
        periodicities=200,  # 序列的规律性周期
        input_window_size=30,  # 输入的值
        output_window_size=
        10,  # 输出的值,其中,window_size = input_window_size + output_window_size
        num_features=1,  # 在某个时间点上观察到的值的维度
        loss=tf.contrib.timeseries.ARModel.NORMAL_LIKELIHOOD_LOSS
        # 损失,有2种可选, NORMAL_LIKELIHOOD_LOSS , SQUARED_LOSS
    )

    # 5. 训练 training
    ar.train(input_fn=train_input_fn, steps=6000)

    # 6. 验证 evaluation ---- 使用训练好的模型在原先的训练集上进行计算,观察模型的拟合效果
    evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
    evaluation = ar.evaluate(input_fn=evaluation_input_fn, steps=1)
    # keys of evaluation: ['covariance', 'loss', 'mean', 'observed', 'start_tuple', 'times', 'global_step']
    # mean : 保存970个预测值; times: 前者对应的时间点; loss :总的损失;
    # start_tuple:用于之后的预测,保存最后30步的输出值和时间点

    # 7. 预测 prediction
    (predictions, ) = tuple(
        ar.predict(input_fn=tf.contrib.timeseries.
                   predict_continuation_input_fn(evaluation, steps=250)))
    # 在1000步之后,向后预测了250个时间点,对应的值保存在 prediction['mean']

    plt.figure(figsize=(15, 5))
    plt.plot(data['times'].reshape(-1),
             data['values'].reshape(-1),
             label='origin')
    plt.plot(evaluation['times'].reshape(-1),
             evaluation['mean'].reshape(-1),
             label='evaluation')
    plt.plot(predictions['times'].reshape(-1),
             predictions['mean'].reshape(-1),
             label='prediction')
    plt.xlabel('time_step')
    plt.ylabel('values')
    plt.legend(loc=4)
    plt.savefig('predict_result.jpg')
Esempio n. 3
0
 def tensor_flow_go(self, net_list):
     predicted_last = pd.DataFrame(columns=['UE', 'erab', 'flow', 'handover', 
                                            'rrc', 'net_num'])
     for cell in net_list: 
         y_zhunbei = self.zhibiao_need[self.zhibiao_need.net_num == cell][['time',
                                 'UE', 'erab', 'flow', 'handover', 'rrc']]
         y = numpy.array(y_zhunbei[['UE', 'erab', 'flow', 'handover', 
                              'rrc']])
         x = np.array(range(len(y)))
   
         data = {tf.contrib.timeseries.TrainEvalFeatures.TIMES: x,
                 tf.contrib.timeseries.TrainEvalFeatures.VALUES: y,}
         reader = NumpyReader(data)
   
         train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(reader, batch_size=10, 
                                                                    window_size=48)
         ar = tf.contrib.timeseries.ARRegressor(
                 periodicities=48, input_window_size=24, output_window_size=24,
                 num_features=5, loss=tf.contrib.timeseries.ARModel.NORMAL_LIKELIHOOD_LOSS)
 
         ar.train(input_fn=train_input_fn, steps=700)
 
         evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
 # keys of evaluation: ['covariance', 'loss', 'mean', 'observed', 'start_tuple', 'times', 'global_step']
         evaluation = ar.evaluate(input_fn=evaluation_input_fn, steps=1)
         (predictions,) = tuple(ar.predict(
                 input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
                         evaluation, steps=24)))
         prediction = predictions['mean']
   
         predicted_out = DataFrame(prediction)
         predicted_out.columns = ['UE', 'erab', 'flow', 'handover', 'rrc']      
         predicted_out['net_num'] = cell
         predicted_last = predicted_last.append(predicted_out)
Esempio n. 4
0
def main(_):
    x = np.array(range(1000))
    noise = np.random.uniform(-0.2, 0.2, 1000)
    y = np.sin(np.pi * x / 100) + x / 200. + noise
    plt.plot(x, y)
    plt.savefig('timeseries_y.jpg')

    data = {
        tf.contrib.timeseries.TrainEvalFeatures.TIMES: x,
        tf.contrib.timeseries.TrainEvalFeatures.VALUES: y,
    }

    reader = NumpyReader(data)

    train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
        reader, batch_size=16, window_size=40)

    ar = tf.contrib.timeseries.ARRegressor(
        periodicities=200, input_window_size=30, output_window_size=10,
        num_features=1,
        loss=tf.contrib.timeseries.ARModel.NORMAL_LIKELIHOOD_LOSS)

    ar.train(input_fn=train_input_fn, steps=6000)

    evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
    # keys of evaluation: ['covariance', 'loss', 'mean', 'observed', 'start_tuple', 'times', 'global_step']
    evaluation = ar.evaluate(input_fn=evaluation_input_fn, steps=1)

    (predictions,) = tuple(ar.predict(
        input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
            evaluation, steps=250)))
    def train_predict(self, datax, datay, predict_len):
        data = {
            tf.contrib.timeseries.TrainEvalFeatures.TIMES: datax,
            tf.contrib.timeseries.TrainEvalFeatures.VALUES: datay,
        }
        reader = NumpyReader(data)
        train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
            reader, batch_size=128, window_size=8)

        estimator = ts_estimators.TimeSeriesRegressor(
            model=_LSTMModel(num_features=1, num_units=28),
            optimizer=tf.train.AdamOptimizer(0.001))

        estimator.train(input_fn=train_input_fn, steps=2000)
        evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
        evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
        # Predict starting after the evaluation
        (predictions, ) = tuple(
            estimator.predict(
                input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
                    evaluation, steps=predict_len)))
        #观察(times表示时间序列)
        observed_times = evaluation["times"][0]
        observed = evaluation["observed"][0, :, :]
        #评估
        evaluated_times = evaluation["times"][0]
        evaluated = evaluation["mean"][0]
        #预测
        predicted_times = predictions['times']
        predicted = predictions["mean"]
        #转列表
        predicts = (predictions['mean'].T.tolist())
        return predicts
def main(_):
    #构建数据
    x = np.array(range(1000))
    noise = np.random.uniform(-0.2, 0.2, 1000)
    y = np.sin(np.pi * x / 100) + x / 200. + noise
    plt.plot(x, y)
    plt.savefig('timeseries_y.jpg')

    #读取数据
    data = {
        tf.contrib.timeseries.TrainEvalFeatures.TIMES: x,
        tf.contrib.timeseries.TrainEvalFeatures.VALUES: y,
    }
    reader = NumpyReader(data)
    #创建用于训练的batch
    train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(reader,
                                                               batch_size=16,
                                                               window_size=40)

    #利用AR模型,进行时间序列建模
    ar = tf.contrib.timeseries.ARRegressor(
        periodicities=200,
        input_window_size=30,
        output_window_size=10,
        num_features=1,
        loss=tf.contrib.timeseries.ARModel.NORMAL_LIKELIHOOD_LOSS
    )  #AR模型的输入为前30个时间对,输出为后10个时间对,二者之和刚好等于train_batch一个时间序列的长度;num_features:表示时间点value的维度;
    #periodicities:表示时间序列的周期

    #进行训练
    ar.train(input_fn=train_input_fn, steps=6000)  #训练6000次

    #利用训练数据对训练好的模型进行 校验
    evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)

    evaluation = ar.evaluate(
        input_fn=evaluation_input_fn, steps=1
    )  #evaluation有如下关键字[covariance,loss,mean,observed,start_tuple,times,global_step],其中,evaluation[mean]:预测值(共1000个时间序列对,其中前30个是输入,后970为预测值);利用evaluation[start_tuple]进行预测(为evaluation的最后30个预测时间对);evaluation[times]:对应的时间点

    (predictions, ) = tuple(
        ar.predict(
            input_fn=tf.contrib.timeseries.predict_contrinuation_input_fn(
                evaluation, steps=250)))  #预测1000个时间序列对后另外250个时间对的值

    #作图
    plt.figure(figsize=(15, 5))
    plt.plot(data['times'].reshape(-1),
             data['values'].reshape(-1),
             label='origin')
    plt.plot(evaluation['times'].reshape(-1),
             evaluation['mean'].reshape(-1),
             label='evaluation')
    plt.plot(predictions['times'].reshape(-1),
             predictions['mean'].reshape(-1),
             label='predicion')
    plt.xlabel('time_step')
    plt.ylabel('values')
    plt.legend(loc=4)
    plt.savefig('predict_result.jpg')
Esempio n. 7
0
def main():
    x = np.array(range(1000))
    noise = np.random.uniform(-0.2, 0.2, 1000)
    y = np.sin(np.pi * x / 50) + np.cos(np.pi * x / 50) +\
        np.sin(np.pi * x / 25) + np.exp(0.001 * x) + noise
    plt.plot(x, y)
    plt.show()

    data = {
        tf.contrib.timeseries.TrainEvalFeatures.TIMES: x,
        tf.contrib.timeseries.TrainEvalFeatures.VALUES: y,
    }
    reader = NumpyReader(data)

    train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(reader,
                                                               batch_size=5,
                                                               window_size=100)

    estimator = ts_estimators.TimeSeriesRegressor(
        model=_LSTMModel(num_features=1, num_units=128),
        optimizer=tf.train.AdamOptimizer(0.01))
    estimator.train(input_fn=train_input_fn, steps=2000)

    evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)

    evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)

    (predictions, ) = tuple(
        estimator.predict(
            input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
                evaluation, steps=200)))

    observed_times = evaluation['times'][0]
    observed = evaluation['observed'][0, :, :]
    evaluated_times = evaluation['times'][0]
    evaluated = evaluation['mean'][0]
    predicted_times = predictions['times']
    predicted = predictions['mean']

    plt.figure(figsize=(15, 5))
    plt.axvline(999, linestyle="dotted", linewidth=4, color='r')
    observed_line = plt.plot(observed_times,
                             observed,
                             label='observation',
                             color='k')
    evaluated_line = plt.plot(evaluated_times,
                              evaluated,
                              label='evaluation',
                              color='g')
    predicted_line = plt.plot(predicted_times,
                              predicted,
                              label='prediction',
                              color='r')
    plt.legend(
        handles=[observed_line[0], evaluated_line[0], predicted_line[0]],
        loc='upper left')

    plt.savefig('lstm_single_var.jpg')
    plt.show()
Esempio n. 8
0
def main(_):
    x = np.array(range(1000))
    noise = np.random.uniform(-0.2, 0.2, 1000)
    y = np.sin(np.pi * x / 100) + x / 200. + noise
    plt.plot(x, y)
    plt.savefig('timeseries_xiao.pdf')

    data = {
        tf.contrib.timeseries.TrainEvalFeatures.TIMES: x,
        tf.contrib.timeseries.TrainEvalFeatures.VALUES: y,
    }

    reader = NumpyReader(data)

    train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(reader,
                                                               batch_size=16,
                                                               window_size=40)

    ar = tf.contrib.timeseries.ARRegressor(
        periodicities=200,
        input_window_size=30,
        output_window_size=10,
        num_features=1,
        loss=tf.contrib.timeseries.ARModel.NORMAL_LIKELIHOOD_LOSS)

    ar.train(input_fn=train_input_fn, steps=6000)

    evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
    # keys of evaluation: ['covariance', 'loss', 'mean', 'observed', 'start_tuple', 'times', 'global_step']
    evaluation = ar.evaluate(input_fn=evaluation_input_fn, steps=1)

    (predictions, ) = tuple(
        ar.predict(input_fn=tf.contrib.timeseries.
                   predict_continuation_input_fn(evaluation, steps=250)))

    plt.figure(figsize=(15, 5))
    plt.plot(data['times'].reshape(-1),
             data['values'].reshape(-1),
             label='origin')
    plt.plot(evaluation['times'].reshape(-1),
             evaluation['mean'].reshape(-1),
             label='evaluation')
    plt.plot(predictions['times'].reshape(-1),
             predictions['mean'].reshape(-1),
             label='prediction')
    plt.xlabel('time_step')
    plt.ylabel('values')
    plt.legend(loc=4)
    plt.savefig('predict_result_xiao.pdf')
Esempio n. 9
0
    def predict(self, estimator, y):
        data = self._get_timeseries(y)
        reader = NumpyReader(data)
        evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
        evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
        # 预测predict_time的步长
        (predictions,) = tuple(estimator.predict(
            input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
                evaluation, steps=self.predict_time)))

        # 预测时间
        # predicted_times = predictions['times']
        predicted = predictions["mean"]
        outcome = []
        for i in range(len(predicted)):
            outcome.append(predicted[i][0])
        return outcome
Esempio n. 10
0
def predict():
    kline_data = HuobiService.get_kline('btcusdt', '1min', 2000)
    kline = pd.DataFrame(kline_data['data'])
    closed = kline['close']
    closed = closed.values[::-1]
    ts = kline['id']

    price = np.array(closed)
    time = np.array(range(2000))

    data = {
        tf.contrib.timeseries.TrainEvalFeatures.TIMES: time,
        tf.contrib.timeseries.TrainEvalFeatures.VALUES: price,
    }

    reader = NumpyReader(data)

    train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
        reader, batch_size=10, window_size=40)

    ar = tf.contrib.timeseries.ARRegressor(
        periodicities=100, input_window_size=30, output_window_size=10,
        num_features=1,
        loss=tf.contrib.timeseries.ARModel.NORMAL_LIKELIHOOD_LOSS)

    ar.train(input_fn=train_input_fn, steps=500)

    evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
    # keys of evaluation: ['covariance', 'loss', 'mean', 'observed', 'start_tuple', 'times', 'global_step']
    evaluation = ar.evaluate(input_fn=evaluation_input_fn, steps=1)

    (predictions,) = tuple(ar.predict(
        input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
            evaluation, steps=1)))

    predictions = float(predictions['mean'].reshape(-1)[0])
    ret = dict()
    ret['prediction'] = float("{0:.2f}".format(predictions))
    ret['cur'] = float("{0:.2f}".format(float(closed[-1])))
    print(ret)
    return jsonify(ret)
Esempio n. 11
0
    def train(self, ori_data, x, y):
        data = self._get_timeseries(y)
        reader = NumpyReader(data)

        # 历史数据必须超过三个预测时间
        if len(y) > 3*self.predict_time:
            # tf.contrib.timeseries.RandomWindowInputFn会在reader的所有数据中,随机选取窗口长度为window_size的序列,
            # 并包装成batch_size大小的batch数据。换句话说,一个batch内共有batch_size个序列,每个序列的长度为window_size。
            train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
                reader, batch_size=4, window_size=3*self.predict_time)

            # 定义lstm模型,num_features = 1表示单变量时间序列,
            # num_units=n表示使用隐层(记忆和储存过去状态的节点个数)为n大小的LSTM模型。
            estimator = ts_estimators.TimeSeriesRegressor(
                model=LSTMModel(self.predict_time, num_features=1, num_units=3*self.predict_time),
                optimizer=tf.train.AdamOptimizer(0.001))
            # 在原有len(y)基础上预测
            estimator.train(input_fn=train_input_fn, steps=len(y))
            return estimator
        else:
            print('few data')
Esempio n. 12
0
    def predictor_LSTM(self, data, batch_size, window_size, num_features,
                       num_units, train_steps, predict_steps, learning_rate):
        reader = NumpyReader(data)

        train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
            reader, batch_size=batch_size, window_size=window_size)

        estimator = ts_estimators.TimeSeriesRegressor(
            model=LSTMModel(num_features=num_features, num_units=num_units),
            optimizer=tf.train.AdamOptimizer(learning_rate))

        estimator.train(input_fn=train_input_fn, steps=train_steps)
        evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
        evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)

        # Predict starting after the evaluation
        (predictions, ) = tuple(
            estimator.predict(
                input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
                    evaluation, steps=predict_steps)))
        print(self.Description)
        observed_times = evaluation["times"][0]
        observed = evaluation["observed"][0, :, :]
        evaluated_times = evaluation["times"][0]
        evaluated = evaluation["mean"][0]
        predicted_times = predictions['times']
        predicted = predictions["mean"]
        result = {}
        result["observed_times"] = observed_times
        result["observed"] = observed
        result["evaluated_times"] = evaluated_times
        result["evaluated"] = evaluated
        result["predicted_times"] = predicted_times
        result["predicted"] = predicted
        result["average_loss"] = evaluation['average_loss']
        result["loss"] = evaluation["loss"]

        return result
Esempio n. 13
0
    raise NotImplementedError(
        "Exogenous inputs are not implemented for this example.")


if __name__ == '__main__':
  tf.logging.set_verbosity(tf.logging.INFO)
  x = np.array(range(1000))
  noise = np.random.uniform(-0.2, 0.2, 1000)
  y = np.sin(np.pi * x / 50 ) + np.cos(np.pi * x / 50) + np.sin(np.pi * x / 25) + noise

  data = {
      tf.contrib.timeseries.TrainEvalFeatures.TIMES: x,
      tf.contrib.timeseries.TrainEvalFeatures.VALUES: y,
  }

  reader = NumpyReader(data)

  train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
      reader, batch_size=4, window_size=100)

  estimator = ts_estimators.TimeSeriesRegressor(
      model=_LSTMModel(num_features=1, num_units=128),
      optimizer=tf.train.AdamOptimizer(0.001))

  estimator.train(input_fn=train_input_fn, steps=2000)
  evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
  evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
  # Predict starting after the evaluation
  (predictions,) = tuple(estimator.predict(
      input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
          evaluation, steps=200)))
Esempio n. 14
0
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.contrib.timeseries.python.timeseries import NumpyReader

x = np.array(range(1000))
noise = np.random.uniform(-0.2, 0.2, 1000)
y = np.sin(np.pi * x / 100) + x / 200. + noise
plt.plot(x, y)
plt.savefig('timeseries_y.jpg')

data = {
    tf.contrib.timeseries.TrainEvalFeatures.TIMES: x,
    tf.contrib.timeseries.TrainEvalFeatures.VALUES: y,
}

reader = NumpyReader(data)
with tf.Session() as sess:
    full_data = reader.read_full()
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    print(sess.run(full_data))
    coord.request_stop()

train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(reader,
                                                           batch_size=2,
                                                           window_size=10)

with tf.Session() as sess:
    batch_data = train_input_fn.create_batch()
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
Esempio n. 15
0
index_back = 1
while index_back < len(y_train_set):
    if y_train_set[index_back] - y_train_set[index_back - 1] > 100:
        y_train = y_train_set[index_front:index_back]
        x_time = 1
        X_time = np.zeros(len(y_train))
        for i in range(len(y_train)):
            X_time[i] = x_time
            x_time = x_time + 1

        data = {
            tf.contrib.timeseries.TrainEvalFeatures.TIMES: X_time,
            tf.contrib.timeseries.TrainEvalFeatures.VALUES: y_train,
        }

        reader = NumpyReader(data)

        train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
            reader, batch_size=16, window_size=50)

        estimator = ts_estimators.TimeSeriesRegressor(
            model=_LSTMModel(num_features=1, num_units=32),
            optimizer=tf.train.AdamOptimizer(0.001),
            model_dir=model_name_lstm)

        estimator.train(input_fn=train_input_fn, steps=100)

        index_front = index_back

    index_back += 1
Esempio n. 16
0
def main(_):
    x = np.array(range(1000))
    noise = np.random.uniform(-0.2, 0.2, 1000)
    y = np.sin(np.pi * x / 100) + x / 200. + noise
    print(x, y)
    # plt.plot(x, y)
    # plt.savefig('timeseries_y.jpg')

    data = {
        tf.contrib.timeseries.TrainEvalFeatures.TIMES: x,
        tf.contrib.timeseries.TrainEvalFeatures.VALUES: y,
    }

    df = pd.read_csv(r'C:\Users\BBD\Desktop\test\tmp\Cshl0V1SGLeAb6opAABKW103UTU063.csv')
    data = {
        tf.contrib.timeseries.TrainEvalFeatures.TIMES: df['商场ID'].values,
        tf.contrib.timeseries.TrainEvalFeatures.VALUES: df['单位面积营业额'].values,
    }

    reader = NumpyReader(data)

    train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
        reader, batch_size=16, window_size=40)

    ar = tf.contrib.timeseries.ARRegressor(
        periodicities=200, input_window_size=30, output_window_size=10,
        num_features=1,
        loss=tf.contrib.timeseries.ARModel.NORMAL_LIKELIHOOD_LOSS)

    ar.train(input_fn=train_input_fn, steps=6000)

    evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
    # keys of evaluation: ['covariance', 'loss', 'mean', 'observed', 'start_tuple', 'times', 'global_step']
    evaluation = ar.evaluate(input_fn=evaluation_input_fn, steps=1)

    pre_data = ar.predict(
        input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
            evaluation, steps=250))
    print('xxxxxxxxxxxxxxxxxx')
    print(pre_data)
    (predictions,) = tuple(pre_data)
    print('xxxxxxxxxxxxxxxxxxxxxxxxx')
    print(predictions)
    print(type(predictions))
    pre_df = None
    for key in predictions:
        data = predictions[key].reshape(-1)
        print(data)
        data = np.nan_to_num(data)
        print(data)
        df = to_output(data, key)
        pre_df = pd.concat([pre_df, df], axis=1) if pre_df is not None else df
        print(df)
        print(key + '---< ' +str(predictions[key]))
        print(key + '&&&---> ' + str(predictions[key].reshape(-1)))

    print(pre_df)
    pre_df.to_csv(r'C:\Users\BBD\Desktop\test\tmp\12345.csv')
    data_dis = dataDistributionLocal(pre_df)
    print('3333333333333333')
    print(data_dis)
Esempio n. 17
0
def tensor_flow(net_list, zhibiao_in):
    predict_result = []
    for net in net_list:
        y = np.array(zhibiao_in[zhibiao_in.cellname == net].iloc[:, 5:10])
        x = np.array(range(len(y)))
        #print(y)
        #print(x)
        data = {
            tf.contrib.timeseries.TrainEvalFeatures.TIMES: x,
            tf.contrib.timeseries.TrainEvalFeatures.VALUES: y,
        }
        print("net:", net)
        reader = NumpyReader(data)

        train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
            reader, batch_size=10, window_size=48)
        #periodicities=48周期,input_window_size=24, output_window_size=24输入输出 相加等于window_size=48
        #num_features参数表示在一个时间点上观察到的数的维度 5个一个维度
        ar = tf.contrib.timeseries.ARRegressor(
            periodicities=48,
            input_window_size=24,
            output_window_size=24,
            num_features=5,
            loss=tf.contrib.timeseries.ARModel.NORMAL_LIKELIHOOD_LOSS)

        ar.train(input_fn=train_input_fn, steps=500)
        evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
        # keys of evaluation: ['covariance', 'loss'损失值, 'mean'预测值, 'observed', 'start_tuple', 'times'时间mean, 'global_step']
        evaluation = ar.evaluate(input_fn=evaluation_input_fn, steps=1)
        #预测未来24小时
        (predictions, ) = tuple(
            ar.predict(input_fn=tf.contrib.timeseries.
                       predict_continuation_input_fn(evaluation, steps=24)))
        prediction_re = predictions['mean']
        #prediction_time = predictions['times']
        predict_result.append(prediction_re)

        plt.figure(figsize=(15, 5))

        plt.plot(data['times'].reshape(-1),
                 data['values'].reshape(-1)[:].tolist()[::5],
                 label='origin-UE',
                 color="red")
        #plt.plot(data['times'].reshape(-1), data['values'].reshape(-1)[:].tolist()[1::5], label='origin-erab', color="yellow")
        plt.plot(evaluation['times'].reshape(-1),
                 evaluation['mean'].reshape(-1)[:].tolist()[::5],
                 label='evaluation')
        plt.plot(predictions['times'].reshape(-1),
                 predictions['mean'].reshape(-1)[:].tolist()[::5],
                 label='prediction')
        plt.xlabel('time_step')
        plt.ylabel('values')
        plt.legend(loc=4)
        plt.show()
        plt.savefig('predict_result-for%s.jpg' % net)

        #print(prediction_re)
        '''
        [rows, cols] = prediction_re.shape
        for i in range(rows - 1):
            for j in range(cols - 1):
                print(prediction_re[j, i])
        
        a= prediction_re.reshape(-1)

        print(a[:].tolist())
        print("完成")
        '''
    return predict_result
Esempio n. 18
0
def main(_):
    x = np.array(range(1000))
    noise = np.random.uniform(-0.2, 0.2, 1000)
    y = np.sin(np.pi * x / 100) + x / 200.
    plt.plot(x, y)
    plt.savefig('timeseries_y.jpg')

    data = {
        tf.contrib.timeseries.TrainEvalFeatures.TIMES: x,
        tf.contrib.timeseries.TrainEvalFeatures.VALUES: y,
    }

    reader = NumpyReader(data)
    """
        tf.contrib.timeseries.RandomWindowInputFn会在reader的所有数据中,
        随机选取窗口长度为window_size的序列,并包装成batch_size大小的batch数据。
        换句话说,一个batch内共有batch_size个序列,每个序列的长度为window_size
    """
    train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(reader,
                                                               batch_size=16,
                                                               window_size=40)
    """
        ARRegressor:
        第一个参数periodicities表示序列的规律性周期。
        我们在定义数据时使用的语句是:“y = np.sin(np.pi * x / 100) + x / 200. + noise”,
        因此周期为200。
        input_window_size表示模型每次输入的值,
        output_window_size表示模型每次输出的值。
        input_window_size和output_window_size加起来必须等于train_input_fn中总的window_size。
        在这里,我们总的window_size为40,input_window_size为30,
        output_window_size为10,也就是说,一个batch内每个序列的长度为40,
        其中前30个数被当作模型的输入值,后面10个数为这些输入对应的目标输出值。
        最后一个参数loss指定采取哪一种损失,一共有两
        种损失可以选择,分别是NORMAL_LIKELIHOOD_LOSS和SQUARED_LOSS。

        num_features参数表示在一个时间点上观察到的数的维度。我们这里每一步都是一个单独的值,所以num_features=1。

        还有一个比较重要的参数是model_dir。它表示模型训练好后保存的地址,如果不指定的话,就会随机分配一个临时地址。
    """
    ar = tf.contrib.timeseries.ARRegressor(
        periodicities=200,
        input_window_size=30,
        output_window_size=10,
        num_features=1,
        loss=tf.contrib.timeseries.ARModel.SQUARED_LOSS)

    ar.train(input_fn=train_input_fn, steps=6000)
    """
        TFTS中验证(evaluation)的含义是:使用训练好的模型在原先的训练集上进行计算,由此我们可以观察到模型的拟合效果
        如果要理解这里的逻辑,首先要理解之前定义的AR模型:
        它每次都接收一个长度为30的输入观测序列,并输出长度为10的预测序列。
        整个训练集是一个长度为1000的序列,前30个数首先被当作“初始观测序列”输入到模型中,由此就可以计算出下面10步的预测值。
        接着又会取30个数进行预测,这30个数中有10个数就是前一步的预测值,新得到的预测值又会变成下一步的输入,以此类推。

        最终我们得到970个预测值(970=1000-30,因为前30个数是没办法进行预测的)。
        这970个预测值就被记录在evaluation[‘mean’]中。evaluation还有其他几个键值,
        如evaluation[‘loss’]表示总的损失,evaluation[‘times’]表示evaluation[‘mean’]对应的时间点等等。
    """
    evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
    # keys of evaluation: ['covariance', 'loss', 'mean', 'observed',
    # 'start_tuple', 'times', 'global_step']
    evaluation = ar.evaluate(input_fn=evaluation_input_fn, steps=1)

    (predictions, ) = tuple(
        ar.predict(input_fn=tf.contrib.timeseries.
                   predict_continuation_input_fn(evaluation, steps=250)))

    print('loss', evaluation['loss'])

    plt.figure(figsize=(15, 5))
    plt.plot(data['times'].reshape(-1),
             data['values'].reshape(-1),
             label='origin')
    plt.plot(evaluation['times'].reshape(-1),
             evaluation['mean'].reshape(-1),
             label='evaluation')
    plt.plot(predictions['times'].reshape(-1),
             predictions['mean'].reshape(-1),
             label='prediction')

    plt.xlabel('time_step')
    plt.ylabel('values')
    plt.legend(loc=4)
    plt.savefig('predict_result.jpg')
Esempio n. 19
0
import matplotlib 
matplotlib.use('agg')
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.contrib.timeseries.python.timeseries import NumpyReader,CSVReader
#接着,利用np.sin生成一个实验用的时间序列数据,改时间序列数据实际上是在正弦曲线上加入了上升的趋势和一些随机的噪声
x=np.array(range(1000))
noise=np.random.uniform(-0.2,0.2,1000)
y=np.sin(np.pi*x/100)+x/200+noise
plt.plot(x,y)
plt.savefig('timeseries_y.jpg')
data={#以numpy的形式读入
    tf.contrib.timeseries.TrainEvalFeatures.TIMES:x,
    tf.contrib.timeseries.TrainEvalFeatures.VALUES:y
    }
reader=NumpyReader(data)
with tf.Session() as sess:
    full_data=reader.read_full()#返回时间序列对应的tensor
    #调用read_full()方法会生成读取队列
    #调用tf.trian.start_queue_runners启动队列才能正常读取
    coord=tf.train.Coordinator()
    threads=tf.train.start_queue_runners(sess=sess, coord=coord)
    print(sess.run(full_data))
    coord.request_stop()
#建立batch数据集
train_input_fn=tf.contrib.timeseries.RandomWindowInputFn(reader,batch_size=2,window_size=10)
with tf.Session() as sess:
    batch_data=train_input_fn.create_batch()
    coord=tf.train.Coordinator()
    threads=tf.train.start_queue_runners(sess=sess, coord=coord)
    one_batch=sess.run(batch_data[0])
Esempio n. 20
0
def wanzhengxing(da_input):
    date_haha = date_list(da_input)
    dates_list = list(set(list(date_haha)))
    days = len(dates_list)
    date_numbs = 24*days
    net_num_list = list(set(list(da_input['net_num'])))
    net_num_list_input = list(da_input['net_num'])
    
    date_lost_list = list()
    
    for net_item in net_num_list:
        net_num_counter = net_num_list_input.count(net_item)
        if net_num_counter < date_numbs:
            date_lost_list.append(net_item)
    
    for item in date_lost_list:
        
        
    return date_lost_list   




       
class _LSTMModel(ts_model.SequentialTimeSeriesModel):
  """A time series model-building example using an RNNCell."""

  def __init__(self, num_units, num_features, dtype=tf.float32):
    """Initialize/configure the model object.
    Note that we do not start graph building here. Rather, this object is a
    configurable factory for TensorFlow graphs which are run by an Estimator.
    Args:
      num_units: The number of units in the model's LSTMCell.
      num_features: The dimensionality of the time series (features per
        timestep).
      dtype: The floating point data type to use.
    """
    super(_LSTMModel, self).__init__(
        # Pre-register the metrics we'll be outputting (just a mean here).
        train_output_names=["mean"],
        predict_output_names=["mean"],
        num_features=num_features,
        dtype=dtype)
    self._num_units = num_units
    # Filled in by initialize_graph()
    self._lstm_cell = None
    self._lstm_cell_run = None
    self._predict_from_lstm_output = None

  def initialize_graph(self, input_statistics):
    """Save templates for components, which can then be used repeatedly.
    This method is called every time a new graph is created. It's safe to start
    adding ops to the current default graph here, but the graph should be
    constructed from scratch.
    Args:
      input_statistics: A math_utils.InputStatistics object.
    """
    super(_LSTMModel, self).initialize_graph(input_statistics=input_statistics)
    self._lstm_cell = tf.nn.rnn_cell.LSTMCell(num_units=self._num_units)
    # Create templates so we don't have to worry about variable reuse.
    self._lstm_cell_run = tf.make_template(
        name_="lstm_cell",
        func_=self._lstm_cell,
        create_scope_now_=True)
    # Transforms LSTM output into mean predictions.
    self._predict_from_lstm_output = tf.make_template(
        name_="predict_from_lstm_output",
        func_=lambda inputs: tf.layers.dense(inputs=inputs, units=self.num_features),
        create_scope_now_=True)

  def get_start_state(self):
    """Return initial state for the time series model."""
    return (
        # Keeps track of the time associated with this state for error checking.
        tf.zeros([], dtype=tf.int64),
        # The previous observation or prediction.
        tf.zeros([self.num_features], dtype=self.dtype),
        # The state of the RNNCell (batch dimension removed since this parent
        # class will broadcast).
        [tf.squeeze(state_element, axis=0)
         for state_element
         in self._lstm_cell.zero_state(batch_size=1, dtype=self.dtype)])

  def _transform(self, data):
    """Normalize data based on input statistics to encourage stable training."""
    mean, variance = self._input_statistics.overall_feature_moments
    return (data - mean) / variance

  def _de_transform(self, data):
    """Transform data back to the input scale."""
    mean, variance = self._input_statistics.overall_feature_moments
    return data * variance + mean

  def _filtering_step(self, current_times, current_values, state, predictions):
    """Update model state based on observations.
    Note that we don't do much here aside from computing a loss. In this case
    it's easier to update the RNN state in _prediction_step, since that covers
    running the RNN both on observations (from this method) and our own
    predictions. This distinction can be important for probabilistic models,
    where repeatedly predicting without filtering should lead to low-confidence
    predictions.
    Args:
      current_times: A [batch size] integer Tensor.
      current_values: A [batch size, self.num_features] floating point Tensor
        with new observations.
      state: The model's state tuple.
      predictions: The output of the previous `_prediction_step`.
    Returns:
      A tuple of new state and a predictions dictionary updated to include a
      loss (note that we could also return other measures of goodness of fit,
      although only "loss" will be optimized).
    """
    state_from_time, prediction, lstm_state = state
    with tf.control_dependencies(
            [tf.assert_equal(current_times, state_from_time)]):
      transformed_values = self._transform(current_values)
      # Use mean squared error across features for the loss.
      predictions["loss"] = tf.reduce_mean(
          (prediction - transformed_values) ** 2, axis=-1)
      # Keep track of the new observation in model state. It won't be run
      # through the LSTM until the next _imputation_step.
      new_state_tuple = (current_times, transformed_values, lstm_state)
    return (new_state_tuple, predictions)

  def _prediction_step(self, current_times, state):
    """Advance the RNN state using a previous observation or prediction."""
    _, previous_observation_or_prediction, lstm_state = state
    lstm_output, new_lstm_state = self._lstm_cell_run(
        inputs=previous_observation_or_prediction, state=lstm_state)
    next_prediction = self._predict_from_lstm_output(lstm_output)
    new_state_tuple = (current_times, next_prediction, new_lstm_state)
    return new_state_tuple, {"mean": self._de_transform(next_prediction)}

  def _imputation_step(self, current_times, state):
    """Advance model state across a gap."""
    # Does not do anything special if we're jumping across a gap. More advanced
    # models, especially probabilistic ones, would want a special case that
    # depends on the gap size.
    return state

  def _exogenous_input_step(
          self, current_times, current_exogenous_regressors, state):
    """Update model state based on exogenous regressors."""
    raise NotImplementedError(
        "Exogenous inputs are not implemented for this example.")


def ratio_mark(data_input, standard):
    div_mod = divmod(data_input, standard)
    ratio_mark_out = abs(div_mod[0]-1)*div_mod[1]/standard
    ratio_mark_out = ratio_mark_out.replace(0, 1)
    return ratio_mark_out
    
    


def not_ok(data_input, lastday_input, data_input_ratio, net_item, flow_standard, alert_standard):
    
    ratio_need = data_input_ratio[['UE', 'erab', 'handover', 'rrc']]
    ratio_need_columns = ratio_need.columns
    ratio_out = DataFrame()
    for columns_need in ratio_need_columns:
        ratio_item = ratio_need[columns_need]
        ratio_isok = list(map(lambda y:0  if y>alert_standard else 1, ratio_item))
        ratio_out[columns_need] = ratio_isok
        
#    rows_input = ratio_need.iloc[:,0].size
#    columns_input = ratio_need.columns.size
#    for i in range(rows_input):
#        for j in range(columns_input):
#            value_need = ratio_need.iloc[[i]].values[0][j]
#            if value_need > 0.3 :
#                isok = 0
#            else:
#                isok = 1
#            ratio_need.iloc[[i]].values[0][j] = isok
                
    flow_ratio = data_input_ratio['flow']
    flow_ratio.index = range(len(flow_ratio))
    flow_data_pred = data_input['flow']    
    flow_data_pred.index = range(len(flow_data_pred))  
    last_flow = lastday_input['flow']
    
    
    flow_mark = ratio_mark(flow_data_pred, flow_standard)# 超过3G则不考虑比例转换
    flow_ratio_out = flow_ratio*flow_mark
    
    flow_isok = list(map(lambda y:0  if y>alert_standard else 1, flow_ratio_out))
    
    ratio_out['flow'] = flow_isok
    
    #画图
    notok_hours = 24 - ratio_out.sum(axis=0)
    if notok_hours['flow'] > 3:
        fig = plt.figure(figsize=(6, 3))
        ax = fig.add_subplot(111)
        ax.plot(range(24), flow_data_pred, label="flow_pred", color="g")
        ax.plot(range(24), last_flow, label="flow_lastday", color="r")
        ax.set_ylabel('GB')
        ax.set_xlabel('Hour')
        plt.legend(loc="upper left")
        plt.title('net_num: ' + net_item + '  flow lastday')
        #plt.show()
        plt.savefig('F:/work/tianhe4location/output/picture/' + net_item +'.jpg')    
    
    return ratio_out
    


if __name__ == '__main__':
  tf.logging.set_verbosity(tf.logging.INFO)
  #计算当天指标是否有异常
  predicted_zhibiao = pd.read_csv('F:/work/tianhe4location/output/predicted_last.csv')
  predicted_zhibiao.columns = ['hour', 'UE', 'erab', 'flow', 'handover', 'rrc', 'net_num']
  zhibiao_lastday = pd.read_csv("F:/work/tianhe4location/20180521.csv")
  
  zhibiao_last_day = zhibiao_lastday[['hour', 'UE' , 'erab', 'flow', 'handover', 'rrc', 'net_num']] 
  net_num_list = list(set(list(predicted_zhibiao['net_num'])))
  
  bijiao_result = DataFrame()
  
  for net_item in net_num_list:
      pred_zhibiao = predicted_zhibiao[predicted_zhibiao.net_num == net_item][
              ['hour', 'UE' , 'erab', 'flow', 'handover', 'rrc']]
      last_zhibiao = zhibiao_last_day[zhibiao_last_day.net_num == net_item][[
              'hour', 'UE' , 'erab', 'flow', 'handover', 'rrc']]
      #bijiao = DataFrame(np.array(pred_zhibiao) - np.array(last_zhibiao), 
                         #columns=['hour', 'UE' , 'erab', 'flow', 'handover', 'rrc'])
      bijiao = np.array(pred_zhibiao) - np.array(last_zhibiao)                                         
      bijiao_ratio = np.array(bijiao)/np.array(pred_zhibiao)
      
      bijiao_da = DataFrame(bijiao, columns=['hour', 'UE' , 'erab', 
                         'flow', 'handover', 'rrc'])
      bijiao_ratio_da = DataFrame(bijiao_ratio, columns=['hour', 'UE' , 'erab', 
                   'flow', 'handover', 'rrc'])
           
      bijiao_notok = not_ok(pred_zhibiao, last_zhibiao, bijiao_ratio_da, net_item, 5, 0.5) # 3表示比例转换的流量基准, 0.5表示告警门限 
      bijiao_notok['net_num'] = net_item
      bijiao_result = bijiao_result.append(bijiao_notok) 
      
  #bijiao_result.to_csv("F:/work/tianhe4location/output/isok.csv")   
  
  
  #计算是否为公休日
  workday_calender = pd.read_csv("F:/work/tianhe4location/basic_data/2018workday.csv")
  workday_2018 = str2time(list(workday_calender['date']))   
  zhibiao_oneday = pd.read_csv("F:/work/tianhe4location/20180521.csv")
  data = date_list(zhibiao_oneday)
  is_workday = isworkday(data)
  
  if is_workday:
      zhibiao = pd.read_csv("F:/work/tianhe4location/workday.csv")
      zhibiao1 = zhibiao.append(zhibiao_oneday)
      my_file = "F:/work/tianhe4location/workday.csv"
      os.remove(my_file)
      new_index = range(len(zhibiao1))
      zhibiao1.index = new_index   
      zhibiao1.to_csv("F:/work/tianhe4location/workday.csv")
  else:
      zhibiao = pd.read_csv("F:/work/tianhe4location/holiday.csv")
      zhibiao1 = zhibiao.append(zhibiao_oneday)
      my_file = "F:/work/tianhe4location/holiday.csv"
      os.remove(my_file)
      new_index = range(len(zhibiao1))
      zhibiao1.index = new_index   
      zhibiao1.to_csv("F:/work/tianhe4location/holiday.csv")
      
  
  
    #date_input = 
  date_index = date_list(zhibiao)
  zhibiao.index = date_index    

  #zhibiao1.to_csv("F:/work/tianhe4location/test/wori.csv")    
  date_index_list = sorted(list(set(list(date_index))), reverse=True)
  date_len = len(date_index_list)   
  if date_len <= 30:
      zhibiao_need_index = date_index_list
  else:
      zhibiao_need_index = date_index_list[:30]
          
  zhibiao_need = zhibiao.ix[zhibiao_need_index]
  zhibiao_need = zhibiao_need.sort_index()
    
  net_name = set(list(zhibiao_need.net_num))
  prediction_cell = list()
  predicted_last = pd.DataFrame(columns=['UE', 'erab', 'flow', 'handover', 'rrc', 'net_num'])
  
  for cell in net_name: 
      #reader_numpy = numpy.array(zhibiao[zhibiao.net_num =='GK477'][['UE', 'erab',
                                 #'flow', 'handover', 'rrc']])
    
      y = numpy.array(zhibiao_need[zhibiao_need.net_num ==cell][['UE', 'erab',
                                 'flow', 'handover', 'rrc']])
      x = np.array(range(len(y)))
      data = {tf.contrib.timeseries.TrainEvalFeatures.TIMES: x,
              tf.contrib.timeseries.TrainEvalFeatures.VALUES: y,}
      reader = NumpyReader(data)
    
    
    
      #reader = tf.contrib.timeseries.CSVReader(
              #csv_file_name,
              #column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES,)
                    #+ (tf.contrib.timeseries.TrainEvalFeatures.VALUES,) * 1))
      train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
              reader, batch_size=20, window_size=168)

      estimator = ts_estimators.TimeSeriesRegressor(
              model=_LSTMModel(num_features=5, num_units=96),
              optimizer=tf.train.AdamOptimizer(0.001))

      estimator.train(input_fn=train_input_fn, steps=200)
      evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
      evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
  # Predict starting after the evaluation
      (predictions,) = tuple(estimator.predict(
              input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
                      evaluation, steps=24)))

      observed_times = evaluation["times"][0]
      observed = evaluation["observed"][0, :, :]
      evaluated_times = evaluation["times"][0]
      evaluated = evaluation["mean"][0]
      predicted_times = predictions['times']
      predicted = predictions["mean"]
      
      predicted_out = DataFrame(predicted)
      predicted_out.columns = ['UE', 'erab', 'flow', 'handover', 'rrc']
      predicted_out['net_num'] = cell
      predicted_last = predicted_last.append(predicted_out)
      
  my_pred_file = 'F:/work/tianhe4location/output/predicted_last.csv'
  os.remove(my_pred_file)
  predicted_last.to_csv('F:/work/tianhe4location/output/predicted_last.csv')
Esempio n. 21
0
def main(_):
    data = np.zeros((3, 1680), dtype=np.float32)
    all_data = np.load('../dataset/data.npy')
    pre = np.zeros((3, 168), dtype=np.float32)
    print(os.listdir('../dataset/'))
    anomaly = np.load('../dataset/anomaly_all.npy')

    val_split = round(0.9 * all_data.shape[1])
    test_split = round(0.1 * all_data.shape[1])
    X_train = all_data[:, :val_split]
    X_test = all_data[:, -test_split:]
    X_test_label = anomaly[:, -test_split:].flatten()
    print(val_split, test_split, X_test_label.shape)

    x = np.array(range(val_split))
    plt.figure(figsize=(16, 8))
    ts = ['TS6', 'TS16', 'TS34']
    for i in range(3):
        x_train = X_train[i]
        data = {
            tf.contrib.timeseries.TrainEvalFeatures.TIMES: x,
            tf.contrib.timeseries.TrainEvalFeatures.VALUES: x_train,
        }

        reader = NumpyReader(data)
        train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
            reader, batch_size=16, window_size=40)

        ar = tf.contrib.timeseries.ARRegressor(
            periodicities=24,
            input_window_size=30,
            output_window_size=10,
            num_features=1,
            loss=tf.contrib.timeseries.ARModel.NORMAL_LIKELIHOOD_LOSS)

        ar.train(input_fn=train_input_fn, steps=6000)

        evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
        # keys of evaluation: ['covariance', 'loss', 'mean', 'observed', 'start_tuple', 'times', 'global_step']
        evaluation = ar.evaluate(input_fn=evaluation_input_fn, steps=1)

        (predictions, ) = tuple(
            ar.predict(
                input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
                    evaluation, steps=test_split)))
        pre[i] = predictions['mean'].reshape(-1)
        ax = plt.subplot(311 + i)
        plt.xticks(fontsize=20)
        plt.yticks(fontsize=20)
        plt.plot(data['times'].reshape(-1),
                 data['values'].reshape(-1),
                 color='orangered',
                 lw=2,
                 label='Original Data')
        #plt.plot(evaluation['times'].reshape(-1), evaluation['mean'].reshape(-1),  linewidth=1.0, linestyle=':', color = 'sandybrown',marker='*',label='evaluation')
        plt.plot(predictions['times'].reshape(-1),
                 predictions['mean'].reshape(-1),
                 color='orangered',
                 linestyle='--',
                 lw=2,
                 label='Prediction Data')
        if i == 2:
            ax.set_xlabel('TIMESTEMPS', fontsize=20)
        ax.set_ylabel(ts[i], fontsize=20)
        ax.legend(loc=2, fontsize=10)

    plt.savefig('ar.pdf')
    plt.show()
    """
Esempio n. 22
0
    tf.logging.set_verbosity(tf.logging.INFO)
    x, y = get_rnn_data(18000, "15T")
    x_train, y_train = x[:900], y[:900]  # first 900 data points for training
    x_eval, y_eval = x[900:], y[900:]  # last 300 data points for evaluation

    data_train = {
        tf.contrib.timeseries.TrainEvalFeatures.TIMES: x_train,
        tf.contrib.timeseries.TrainEvalFeatures.VALUES: y_train,
    }

    data_eval = {
        tf.contrib.timeseries.TrainEvalFeatures.TIMES: x_eval,
        tf.contrib.timeseries.TrainEvalFeatures.VALUES: y_eval,
    }

    reader = NumpyReader(data_train)
    reader_eval = NumpyReader(data_eval)

    train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(reader,
                                                               batch_size=4,
                                                               window_size=100)

    estimator = ts_estimators.TimeSeriesRegressor(
        model=_LSTMModel(num_features=1, num_units=128),
        optimizer=tf.train.AdamOptimizer(0.001))

    estimator.train(input_fn=train_input_fn, steps=2000)
    evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(
        reader_eval)
    evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
    # Predict starting after the evaluation
Esempio n. 23
0
def train_and_predict_timeseries_lstm(data,
                                      window_size=100,
                                      num_features=1,
                                      num_units=128,
                                      train_steps=1000,
                                      batch_size=4):
    '''
  function created by txy based on the __main__ function in this file.
  data is a dict with keys "time" and "values" 
  '''
    tf.logging.set_verbosity(tf.logging.INFO)

    reader = NumpyReader(data)

    train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
        reader, batch_size=batch_size, window_size=window_size
    )  # window_size 是时间序列段的总长度,= input_length + output_length

    # model
    estimator = ts_estimators.TimeSeriesRegressor(
        model=_LSTMModel(num_features=num_features, num_units=num_units),
        optimizer=tf.train.AdamOptimizer(0.001),
        model_dir="./output_model")

    # training
    # 这个模型使用的 window_size 中的 input_length 和 output_length 分别是多少?
    estimator.train(input_fn=train_input_fn, steps=train_steps)
    evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
    evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)

    # Predict starting after the evaluation
    # 以 以上数据的结尾处为起点,对后续 steps 步骤的值进行预测
    (predictions, ) = tuple(
        estimator.predict(
            input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
                evaluation, steps=200)))

    # 绘制结果
    observed_times = evaluation["times"][0]
    observed = evaluation["observed"][0, :, :]
    evaluated_times = evaluation["times"][0]
    evaluated = evaluation["mean"][0]
    predicted_times = predictions['times']
    predicted = predictions["mean"]

    plt.figure(figsize=(15, 5))
    plt.axvline(999, linestyle="dotted", linewidth=4, color='r')
    observed_lines = plt.plot(observed_times,
                              observed,
                              label="observation",
                              color="k")
    evaluated_lines = plt.plot(evaluated_times,
                               evaluated,
                               label="evaluation",
                               color="g")
    predicted_lines = plt.plot(predicted_times,
                               predicted,
                               label="prediction",
                               color="r")
    plt.legend(
        handles=[observed_lines[0], evaluated_lines[0], predicted_lines[0]],
        loc="upper left")
    plt.savefig('predict_result.png')
Esempio n. 24
0
# 1. 定义 观察的时间点x 和 观察到的值y
#    其中,y是sin加上噪声后的值
x = np.array(range(1000))
noise = np.random.uniform(-0.2, 0.2, 1000)
y = np.sin(np.pi * x / 100) + x / 200. + noise
plt.plot(x, y)
plt.savefig('1_timeseries_y.jpg')

# 2. 将 x 和 y 保存到data字典里
data = {
    tf.contrib.timeseries.TrainEvalFeatures.TIMES: x,
    tf.contrib.timeseries.TrainEvalFeatures.VALUES: y,
}

# 3. 利用 NumpyReader 将其读取为一个 reader
reader = NumpyReader(data)

# 4. 真正读取 reader 的值
with tf.Session() as sess:
    full_data = reader.read_full(
    )  # 用read_full 读取 reader 得到一个时间序列对应的 tensor, 需要 start_queue_runners 启动队列才可以用 run()获取值
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    print(sess.run(full_data))
    coord.request_stop()

# 5. 建立读取batch数据的对象
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
    reader, batch_size=2, window_size=10)  # 一个batch里有2个序列,每个序列长度为10

# 6. 真正读取、create batch数据
import tensorflow as tf
from tensorflow.contrib.timeseries.python.timeseries import  NumpyReader


x = np.array(range(1000))
noise = np.random.uniform(-0.2, 0.2, 1000)
y = np.sin(np.pi * x / 100) + x / 200. + noise
plt.plot(x, y)
plt.savefig('timeseries_y.jpg')

data = {
    tf.contrib.timeseries.TrainEvalFeatures.TIMES: x,
    tf.contrib.timeseries.TrainEvalFeatures.VALUES: y,
}

reader = NumpyReader(data)

with tf.Session() as sess:
    full_data = reader.read_full()
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    print(sess.run(full_data))
    coord.request_stop()

train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
    reader, batch_size=2, window_size=10)

with tf.Session() as sess:
    batch_data = train_input_fn.create_batch()
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)