Exemplo n.º 1
0
def main():
    #os.makedirs('result')
    # 建立深层循环网络模型
    #regressor_abs = learn.Estimator(model_fn=lstm_model)
    regressor_abs = SKCompat(learn.Estimator(model_fn=lstm_model,model_dir="Models/model_2"))
    # load data
    #matfn = 'D:\\MATLAB\\work\\fitting\\after full-duplex data\\full-duplex.mat'  # the path of .mat data
    matfn = 'result/full-duplex.mat'
    data = sio.loadmat(matfn)

    for j in [0,5,10,15,20,25,30,34]:
        x = data['x'+str(j)+'b']
        y = data['y'+str(j)+'b']
        Error = []
        for seqnum in range(100):
            print("", seqnum)
            complex_x = []
            complex_y = []

            for i in range(len(x[seqnum])):
                x_abs = abs(x[seqnum][i])
                x_w = math.atan(x[seqnum][i].imag / x[seqnum][i].real)
                complex_x.append([x_abs, x_w])

            for i in range(len(y[seqnum])):
                y_abs = abs(y[seqnum][i])
                y_w = math.atan(y[seqnum][i].imag / y[seqnum][i].real)
                complex_y.append([y_abs, y_w])

    ####for abs
            train_X_abs = generate_data(complex_x[0:TRAINING_EXAMPLES])
            #train_y_abs = complex_y[int(TIMESTEPS / 2):TRAINING_EXAMPLES + int(TIMESTEPS / 2) - 1]
            train_y_abs = complex_y[TIMESTEPS-1:TRAINING_EXAMPLES]
            test_X_abs = generate_data(complex_x[TRAINING_EXAMPLES:TESTING_EXAMPLES + TRAINING_EXAMPLES - 1])
            #test_y_abs = complex_y[TRAINING_EXAMPLES + int(TIMESTEPS / 2):TESTING_EXAMPLES + TRAINING_EXAMPLES + int(TIMESTEPS / 2) - 1]
            test_y_abs = complex_y[TRAINING_EXAMPLES :TESTING_EXAMPLES + TRAINING_EXAMPLES - 1]
            # 调用fit函数训练模型
            regressor_abs.fit(train_X_abs, train_y_abs, batch_size=BATCH_SIZE, steps=TRAINING_STEPS)
            #regressor_abs.fit(train_X_abs, train_y_abs, batch_size=BATCH_SIZE, steps=TRAINING_STEPS)
    # 预测
            predicted_abs = [[pred] for pred in regressor_abs.predict(test_X_abs)]
    # db
            a_real = []
            b_real = []
            a_pre = []
            b_pre = []
            for i in range(len(test_y_abs)):
                a_real.append(test_y_abs[i][0] * math.cos(math.atan(test_y_abs[i][1])))
                b_real.append(test_y_abs[i][0] * math.sin(math.atan(test_y_abs[i][1])))
                a_pre.append(np.array(predicted_abs[i])[0][0] * math.cos(math.atan(np.array(predicted_abs[i])[0][1])))
                b_pre.append(np.array(predicted_abs[i])[0][0] * math.sin(math.atan(np.array(predicted_abs[i])[0][1])))
            diff_real = []
            diff_imag = []
            for i in range(len(a_real)):
                diff_real.append(a_real[i] - a_pre[i])
                diff_imag.append(b_real[i] - b_pre[i])
            error = 10 * math.log10(np.mean(np.square(predicted_abs))) - 10 * math.log10(np.mean(np.square(diff_real) + np.square(diff_imag)))
            print("db is : %f" % error)
            Error.append(error)
        sio.savemat('result/' + str(j) + 'DB.mat', {'Error': Error})
Exemplo n.º 2
0
class dp_LSTM:
	"""
	Parameters
	------------
	Attributes
	------------
	"""
	def __init__(self,HIDDEN_SIZE=50,NUM_LAYERS=5,BATCH_SIZE=32,TRAINING_STEPS=3000,
		learning_rate=0.1,optimizer ='Adagrad'):
		# 神经网络参数
		self.HIDDEN_SIZE = HIDDEN_SIZE  # LSTM隐藏节点个数
		self.NUM_LAYERS  = NUM_LAYERS   # LSTM层数
		self.BATCH_SIZE  = BATCH_SIZE   # batch大小
		# 数据参数
		self.TRAINING_STEPS = TRAINING_STEPS  # 训练轮数
		self.learning_rate = learning_rate # 学习率
		self.optimizer = optimizer
		self.regressor=None

	# LSTM结构单元
	def LstmCell(self):
		lstm_cell = tf.contrib.rnn.BasicLSTMCell(self.HIDDEN_SIZE)
		return lstm_cell

	def lstm_model(self,X, y):
		# 使用多层LSTM,不能用lstm_cell*NUM_LAYERS的方法,会导致LSTM的tensor名字都一样
		cell = tf.contrib.rnn.MultiRNNCell([self.LstmCell() for _ in range(self.NUM_LAYERS)])
		# 将多层LSTM结构连接成RNN网络并计算前向传播结果
		output, _ = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
		output = tf.reshape(output, [-1, self.HIDDEN_SIZE])
		# 通过无激活函数的全联接层计算线性回归,并将数据压缩成一维数组的结构
		predictions = tf.contrib.layers.fully_connected(output, 1, None)
		# 将predictions和labels调整为统一的shape
		y = tf.reshape(y, [-1])
		predictions = tf.reshape(predictions, [-1])
		# 计算损失值,使用平均平方误差
		loss = tf.losses.mean_squared_error(predictions, y)
		# 创建模型优化器并得到优化步骤
		train_op = tf.contrib.layers.optimize_loss(
			loss,
			tf.train.get_global_step(),
			optimizer=self.optimizer,
			learning_rate=self.learning_rate)
		return predictions, loss, train_op

	def fit(self,train_X=None,train_y=None):
		# 建立深层循环网络模型
		self.regressor = SKCompat(tf.contrib.learn.Estimator(model_fn=self.lstm_model))
		# 调用fit函数训练模型
		self.regressor.fit(train_X, train_y, batch_size=self.BATCH_SIZE, steps=self.TRAINING_STEPS)

	def predict(self,test_X):
		# 使用训练好的模型对测试集进行预测
		predicted = array([pred for pred in self.regressor.predict(test_X)])
		return predicted
 def fit(self, train_X=None, train_y=None):
     # 转化为LSTM模型的输入X
     train_X = train_X.reshape([-1, 1, train_X.shape[1]])
     train_X = train_X.astype(np.float32)
     # 建立深层循环网络模型
     self.regressor = SKCompat(
         tf.contrib.learn.Estimator(model_fn=self.lstm_model))
     # 调用fit函数训练模型
     self.regressor.fit(train_X,
                        train_y,
                        batch_size=self.BATCH_SIZE,
                        steps=self.TRAINING_STEPS)
Exemplo n.º 4
0
def train_func(train_X, train_y, model_path="Models/model_sin"):
    if not os.path.exists(model_path):  ###判断文件是否存在,返回布尔值
        os.makedirs(model_path)
    shutil.rmtree(model_path)
    # 进行训练
    # 封装之前定义的lstm
    regressor = SKCompat(
        learn.Estimator(model_fn=lstm_model, model_dir=model_path))
    regressor.fit(train_X,
                  train_y,
                  batch_size=BATCH_SIZE,
                  steps=TRAINING_STEPS)
    return None
def train_func(train_X,train_y,model_path="Models/model_sin3"):
    if not os.path.exists(model_path):  ###判断文件是否存在,返回布尔值
        os.makedirs(model_path)
    file_list=[]
    for root, dirs, files in os.walk(model_path, topdown=True, onerror=None, followlinks=False):
        file_list.append(files)
    # In[]
    for i in range(0,len(file_list[0])):
        os.remove(model_path+'/'+file_list[0][i])
    # 进行训练
    # 封装之前定义的lstm
    regressor = SKCompat(learn.Estimator(model_fn=lstm_model, model_dir=model_path))
    regressor.fit(train_X, train_y, batch_size=BATCH_SIZE, steps=TRAINING_STEPS)
    return None
Exemplo n.º 6
0
def train():
    # 用sin生成训练和测试数据集
    test_start = TRAINING_EXAMPLES * SAMPLE_GAP
    test_end = (TRAINING_EXAMPLES + TESTING_EXAMPLES) * SAMPLE_GAP
    train_X, train_y = generate_data(
        np.sin(np.linspace(0, test_start, TRAINING_EXAMPLES,
                           dtype=np.float32)))
    test_X, test_y = generate_data(
        np.sin(
            np.linspace(test_start,
                        test_end,
                        TESTING_EXAMPLES,
                        dtype=np.float32)))

    # 建立深层循环网络模型
    regressor = SKCompat(
        learn.Estimator(model_fn=lstm_model, model_dir='model/'))

    plt.ion()
    # plt.show()
    # 调用fit函数训练模型
    regressor.fit(train_X,
                  train_y,
                  batch_size=BATCH_SIZE,
                  steps=TRAINING_STEPS)
    # regressor.fit(train_X, train_y, batch_size=1, steps=1)

    # 使用训练好的模型对测试集进行预测
    predicted = [[pred] for pred in regressor.predict(test_X)]

    # 计算rmse作为评价指标
    rmse = np.sqrt(((predicted - test_y)**2).mean(axis=0))
    print('Mean Square Error is: %f' % (rmse[0]))

    # 对预测曲线绘图,并存储到sin.jpg
    plot_predicted, = plt.plot(predicted, label='predicted')
    plot_test, = plt.plot(test_y, label='real_sin')
    # plt.plot(xs[0, :], res[0].flatten(), 'r', xs[0, :], pred.flatten()[:TIME_STEPS], 'b--')
    plt.legend([plot_predicted, plot_test], ['predicted', 'real_sin'])
    plt.ylim((-1.2, 1.2))
    plt.draw()
    # plt.pause(0.3)
    plt.savefig('cos.png')
Exemplo n.º 7
0
def main(unused_argv):
    # Load datasets
    abalone_train, abalone_test, abalone_predict = maybe_download(
        FLAGS.train_data, FLAGS.test_data, FLAGS.predict_data)

    # Training examples
    training_set = tf.contrib.learn.datasets.base.load_csv_without_header(
        filename=abalone_train, target_dtype=np.int, features_dtype=np.float32)

    # Test examples
    test_set = tf.contrib.learn.datasets.base.load_csv_without_header(
        filename=abalone_test, target_dtype=np.int, features_dtype=np.float32)

    # Set of 7 examples for which to predict abalone ages
    prediction_set = tf.contrib.learn.datasets.base.load_csv_without_header(
        filename=abalone_predict,
        target_dtype=np.int,
        features_dtype=np.float32)

    # Set model params
    model_params = {"learning_rate": LEARNING_RATE}

    # Instantiate Estimator
    est = Estimator(model_fn=model_fn, params=model_params)
    nn = SKCompat(est)

    # Fit
    nn.fit(x=training_set.data, y=training_set.target, steps=5000)

    # Score accuracy
    ev = est.evaluate(x=test_set.data, y=test_set.target, steps=1)
    print("Loss: %s" % ev["loss"])
    print("Root Mean Squared Error: %s" % ev["rmse"])

    # Print out predictions
    print(prediction_set.data.shape)
    predictions = nn.predict(x=prediction_set.data)
    for i, p in enumerate(predictions):
        print("Prediction %s: %s" % (i + 1, p))
Exemplo n.º 8
0
	def _init_estimator(self, k):
		est_args = self.est_args.copy()
		est_name = '{}/{}'.format(self.name, k)
		# TODO: consider if add a random_state, actually random_state of each estimator can be set in est_configs in
		# main program by users, so we need not to set random_state there.
		# More importantly, if some estimators have no random_state parameter, this assignment can throw problems.
		if est_args.get('base_random_seed', None) is None:
			est_args['base_random_seed'] = copy.deepcopy(self.seed)
		else:
			est_args['base_random_seed'] = est_args['base_random_seed'] + k ** 2

		params = tf.contrib.tensor_forest.python.tensor_forest.ForestHParams(**est_args)
		estimator = SKCompat(tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator(params))

		return estimator
Exemplo n.º 9
0
def main():
    ##########################################################################
    # 数据预处理
    data = pd.read_excel(data_str, header=0, sheetname=sheet_str)
    data.head()
    # 获取时间及收盘价
    time = data.iloc[:, 0].tolist()
    data = data.iloc[:, 4].tolist()
    # 观察原数据基本特征。
    plt.figure(figsize=(12, 8))
    plt.rcParams['font.sans-serif'] = 'SimHei'  # 设置字体为SimHei显示中文
    plt.rcParams['axes.unicode_minus'] = False  # 设置正常显示符号
    plt.title('原始数据')
    plt.plot(time, data)
    plt.show()
    #####################################################################
    '''载入tf中仿sklearn训练方式的模块'''
    learn = tf.contrib.learn
    # 模型保存
    '''初始化LSTM模型,并保存到工作目录下以方便进行增量学习'''
    regressor = SKCompat(
        learn.Estimator(model_fn=lstm_model, model_dir='Models/model_1'))
    # 数据处理
    '''对原数据进行尺度缩放'''
    data = data_processing(data)
    '''将4000个数据来作为训练样本'''
    train_x, train_y = generate_data(data[0:4000])
    '''将剩余数据作为测试样本'''
    test_x, test_y = generate_data(data[3999:-1])
    #################################################################################
    # 训练数据
    regressor.fit(train_x,
                  train_y,
                  batch_size=BATCH_SIZE,
                  steps=TRAINING_STEPS)
    #################################################################################
    # 预测测试样本
    '''利用已训练好的lstm模型,来生成对应测试集的所有预测值'''
    predicted = np.array([pred for pred in regressor.predict(test_x)])
    '''绘制反标准化之前的真实值与预测值对比图'''
    plt.figure(figsize=(12, 8))
    plt.plot(predicted, label='预测值')
    plt.plot(test_y, label='真实值')
    plt.title('反标准化之前')
    plt.legend()
    plt.show()  # 反标准化之前的预测
    ###################################################################################
    # 反标准化
    scale_predicted = scale_inv(predicted)
    scale_test_y = scale_inv(test_y)
    '''绘制反标准化之后的真实值与预测值对比图'''
    plt.figure(figsize=(12, 8))
    plt.plot(scale_predicted, label='预测值')
    plt.plot(scale_test_y, label='真实值')
    plt.title('反标准化之后')
    plt.legend()
    plt.show()
    ######################################################################################
    # 对比图
    fig = plt.figure(figsize=(12, 8))
    fig.suptitle("对比图")
    ax1 = fig.add_subplot(1, 2, 1)
    # print(len(scale_predicted))
    ax1.plot(time[4012:-1], scale_predicted, label="测试集")
    ax1.plot(time[0:4000], scale_inv(data[0:4000]), label="训练集")
    plt.legend()
    plt.title('训练集数据+测试集数据')
    ax2 = fig.add_subplot(1, 2, 2)
    ax2.plot(time, scale_inv(data))
    plt.title('反标准化后数据')
    ########################################################################################
    # 计算准确率
    pre_rate = precision_rate(scale_predicted, scale_test_y)
    print('准确率为:', pre_rate)
    #########################################################################################
    # 预测未来30天的值
    day = 30
    length = len(data)
    for i in range(day):
        P = list()
        P.append([data[length - TIMESTEPS - 1 + i:length - 1 + i]])
        P = np.array(P, dtype=np.float32)
        pre = regressor.predict(P)
        data = np.append(data, pre)
    pre = data[len(data) - day:len(data) + 1]
    print("====================================")
    print("以下为进行30天的预测数据")
    print("反标准化之前:\n", pre)
    # 反标准化的值
    print("反标准化之后:\n", scale_inv(pre))
    # 预测图
    fig = plt.figure(figsize=(12, 8))
    plt.plot(scale_inv(pre))
    plt.title("未来30天汇率变化预测图")
    plt.show()
Exemplo n.º 10
0
    return test_y, predicted_total


data1 = data.Data()  # 调用data函数得到股票价格
# data2 = data1
data2 = data.Return(data1)  # 调用data的return方法得到时间序列数据,返回return列表

TRAINING_EXAMPLES = int(len(data2) * 0.8)  # 训练样本数

TESTING_EXAMPLES = len(data2) - TRAINING_EXAMPLES  # 测试样本
SAMPLE_GAP = 0.01
predicted_total = []
for i in range(TESTING_EXAMPLES - TIMESTEPS - 1):
    predicted_total.append(0)
seq = np.array(data2[0:TRAINING_EXAMPLES])  # 将时间序列存放到数组,前1500个数据训练
regressor = SKCompat(
    learn.Estimator(model_fn=lstm_model, model_dir="Models/model_2"))
###########################

#####################

test_y, predicted_total = RNN()

#计算return的预测准确率
a = []
b = []
x = 0
for i in test_y:
    if i[0] >= 0:
        a.append(1)
    else:
        a.append(-1)
print "--------------------------------------------"
# create a lstm instance and validation monitor

#validation_monitor = learn.monitors.ValidationMonitor(test_X, test_y,
#                                                     every_n_steps=PRINT_STEPS,
#                                                     early_stopping_rounds=1000)
PRINT_STEPS = 100
validation_monitor = learn.monitors.ValidationMonitor(
    test_X, test_y, every_n_steps=PRINT_STEPS, early_stopping_rounds=1000)
#http://lib.csdn.net/article/aiframework/61081
# 进行训练
# 封装之前定义的lstm
regressor = SKCompat(
    learn.Estimator(model_fn=lstm_model,
                    model_dir="Models/model_0",
                    config=tf.contrib.learn.RunConfig(
                        save_checkpoints_steps=50,
                        save_checkpoints_secs=None,
                        save_summary_steps=100,
                    )))

#regressor.fit(train_X, train_y, batch_size=1, steps=1000,
#              monitors=[validation_monitor])
#nput_fn = tf.contrib.learn.io.numpy_input_fn({"x":train_X}, train_y, batch_size=50,
#                                              num_epochs=1000)
regressor.fit(train_X, train_y, batch_size=50, steps=10000)

#regressor.fit(train_X, train_y,batch_size=50,steps=10000, monitors=[validation_monitor])
# 计算预测值
print "----------fit over,to predict------------"
predicted = [[pred] for pred in regressor.predict(test_X)]
#print predicted
Exemplo n.º 12
0
    func_lstm_cell = lambda : tf.contrib.rnn.LSTMCell(HIDDEN_SIZE)
    cell = tf.contrib.rnn.MultiRNNCell([func_lstm_cell() for _ in range(NUM_LAYERS)])
    output, _ = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
    print(output.get_shape())
    output = tf.reshape(output, [-1, HIDDEN_SIZE])
    pred = tf.contrib.layers.fully_connected(output, 1, None)
    labels = tf.reshape(y, [-1])
    pred = tf.reshape(pred, [-1])

    loss = tf.losses.mean_squared_error(pred, labels)
    train_op = tf.contrib.layers.optimize_loss(loss, tf.contrib.framework.get_global_step(),
                                               optimizer='Adagrad', learning_rate=0.1)
    return pred, loss, train_op


regressor = SKCompat(learn.Estimator(model_fn=lstm_model, model_dir='temp/c8/mdl2'))

test_start = TRAINING_EXAMPLES * SAMPLE_GAP
test_end = (TRAINING_EXAMPLES + TESTING_EXAMPLES) * SAMPLE_GAP

train_X, train_y = generate_data(np.sin(np.linspace(
    0, test_start, TRAINING_EXAMPLES, dtype=np.float32
)))

test_X, test_y = generate_data(np.sin(np.linspace(
    test_start, test_end, TESTING_EXAMPLES, dtype=np.float32
)))

regressor.fit(train_X, train_y, batch_size=BATCH_SIZE, steps=TRAINING_STEPS)

pred_y = regressor.predict(test_X)
Exemplo n.º 13
0
    #将预测的目标转化成one-hot编码的形式,因为一共有三个类别,所以向量长度为3
    #经过转换后,类别分别表示为(1,0,0),(0,1,0),(0,0,1)
    target = tf.one_hot(target, 3, 1, 0)

    # 计算预测值及损失函数。
    #使用了一个全连接层,参数:输入,输出,激活函数
    logits = tf.contrib.layers.fully_connected(features, 3, tf.nn.softmax)
    loss = tf.losses.softmax_cross_entropy(target, logits)

    # 创建优化步骤。
    train_op = tf.contrib.layers.optimize_loss(
        loss,  #损失函数
        tf.contrib.framework.get_global_step(),  #获取训练步数并再训练时更新
        optimizer='Adam',  #定义优化器
        learning_rate=0.01)  #定义学习率
    return tf.arg_max(logits, 1), loss, train_op


iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
    iris.data, iris.target, test_size=0.2, random_state=0)
#封装和训练模型,输出准确率
x_train, x_test = map(np.float32, [x_train, x_test])
classifier = SKCompat(
    learn.Estimator(model_fn=my_model, model_dir="Model/model_1"))
classifier.fit(x_train, y_train, steps=800)

y_predicted = [i for i in classifier.predict(x_test)]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: %.2f%%' % (score * 100))
Exemplo n.º 14
0
    # 输出预测和损失值
    loss = tf.losses.mean_squared_error(predictions, labels)

    # 损失函数优化方法
    train_op = tf.contrib.layers.optimize_loss(
        loss,
        tf.contrib.framework.get_global_step(),
        optimizer="Adagrad",
        learning_rate=0.1)
    return predictions, loss, train_op


#建立深层模型
# Estimator里面还有一个叫SKCompat的类,如果使用x,y而不是input_fn来传参数的形式,需要用这个类包装一下:
# 第二个参数用于本地保存
regressor = SKCompat(
    learn.Estimator(model_fn=lstm_model, model_dir="Models/model_2"))

test_start = TRAINING_EXAMPLES * SAMPLE_GAP
test_end = (TRAINING_EXAMPLES + TESTING_EXAMPLES) * SAMPLE_GAP
train_X, train_y = generate_data(
    np.sin(np.linspace(0, test_start, TRAINING_EXAMPLES, dtype=np.float32)))
test_X, test_y = generate_data(
    np.sin(
        np.linspace(test_start, test_end, TESTING_EXAMPLES, dtype=np.float32)))
regressor.fit(train_X, train_y, batch_size=BATCH_SIZE, steps=TRAINING_STEPS)
predicted = [[pred] for pred in regressor.predict(test_X)]

# 计算MSE
rmse = np.sqrt(((predicted - test_y)**2).mean(axis=0))

fig = plt.figure()
Exemplo n.º 15
0
    # 将predictions和labels调整统一的shape
    labels = tf.reshape(y, [-1])
    predictions = tf.reshape(predictions, [-1])

    loss = tf.losses.mean_squared_error(predictions, labels)

    train_op = tf.contrib.layers.optimize_loss(
        loss, tf.contrib.framework.get_global_step(),
        optimizer="Adagrad", learning_rate=0.1)

    return predictions, loss, train_op

# 4. 进行训练
# 封装之前定义的lstm。
regressor = SKCompat(learn.Estimator(model_fn=lstm_model,model_dir="Models/model_2"))

# 生成数据。
test_start = TRAINING_EXAMPLES * SAMPLE_GAP
test_end = (TRAINING_EXAMPLES + TESTING_EXAMPLES) * SAMPLE_GAP
train_X, train_y = generate_data(np.sin(np.linspace(
    0, test_start, TRAINING_EXAMPLES, dtype=np.float32)))
test_X, test_y = generate_data(np.sin(np.linspace(
    test_start, test_end, TESTING_EXAMPLES, dtype=np.float32)))

# 拟合数据。
regressor.fit(train_X, train_y, batch_size=BATCH_SIZE, steps=TRAINING_STEPS)

# 计算预测值。
predicted = [[pred] for pred in regressor.predict(test_X)]
Exemplo n.º 16
0

#print tf.reshape(train_y, [-1])
start = datetime.datetime.now()
print "--------------------------------------------"
print start
PRINT_STEPS = 100
validation_monitor = learn.monitors.ValidationMonitor(test_X, test_y,
                                                     every_n_steps=PRINT_STEPS,
                                                     early_stopping_rounds=1000)
#http://lib.csdn.net/article/aiframework/61081
# 进行训练
# 封装之前定义的lstm
regressor = SKCompat(learn.Estimator(model_fn=lstm_model, model_dir="Models/model_0",
                                     config=tf.contrib.learn.RunConfig(
                                     save_checkpoints_steps=100,
                                     save_checkpoints_secs=None,
                                     save_summary_steps=100,
                                     )))

#regressor.fit(train_X, train_y, batch_size=1, steps=1000,
#              monitors=[validation_monitor])
#nput_fn = tf.contrib.learn.io.numpy_input_fn({"x":train_X}, train_y, batch_size=50,
#                                              num_epochs=1000)
print "total train step: ",NUM_TRAIN_BATCHES * NUM_EPOCHS
regressor.fit(train_X, train_y,batch_size=BATCH_SIZE,steps= NUM_TRAIN_BATCHES * NUM_EPOCHS )  # steps=train_labels.shape[0]/batch_size * epochs,

#http://blog.mdda.net/ai/2017/02/25/estimator-input-fn 新旧接口之不同
#regressor.fit(train_X, train_y,batch_size=50,steps=10000, monitors=[validation_monitor])
# 计算预测值
print "----------fit over,to predict------------"
predicted = [[pred] for pred in regressor.predict(test_X)]
Exemplo n.º 17
0
    loss = tf.losses.mean_squared_error(predictions, labels)
    '''定义优化器各参数'''
    train_op = tf.contrib.layers.optimize_loss(
        loss,
        tf.contrib.framework.get_global_step(),
        optimizer='Adagrad',
        learning_rate=0.6)
    '''返回预测值、损失函数及优化器'''
    return predictions, loss, train_op


'''载入tf中仿sklearn训练方式的模块'''
learn = tf.contrib.learn
'''初始化我们的LSTM模型,并保存到工作目录下以方便进行增量学习'''
regressor = SKCompat(
    learn.Estimator(model_fn=lstm_model,
                    model_dir='D:\\学习资料\\代码\\PCAP_analysis\\models\\model_2'))
'''对原数据进行尺度缩放'''
data = data_processing(data)
'''将所有样本来作为训练样本'''
train_X, train_y = generate_data(data[:])
'''将所有样本作为测试样本'''
test_X, test_y = generate_data(data[:])
'''以仿sklearn的形式训练模型,这里指定了训练批尺寸和训练轮数'''
regressor.fit(train_X, train_y, batch_size=BATCH_SIZE, steps=TRAINING_STEPS)
'''利用已训练好的LSTM模型,来生成对应测试集的所有预测值'''
predicted = np.array([pred for pred in regressor.predict(test_X)])
'''绘制反标准化之前的真实值与预测值对比图'''
plt.figure()
plt.plot(predicted, label=u'预测值')
plt.plot(test_y, label=u'真实值')
Exemplo n.º 18
0
	def fit(self,train_X=None,train_y=None):
		# 建立深层循环网络模型
		self.regressor = SKCompat(tf.contrib.learn.Estimator(model_fn=self.lstm_model))
		# 调用fit函数训练模型
		self.regressor.fit(train_X, train_y, batch_size=self.BATCH_SIZE, steps=self.TRAINING_STEPS)
Exemplo n.º 19
0
def test_func(test_X, test_y, model_path="Models/model_sin"):
    regressor = SKCompat(
        learn.Estimator(model_fn=lstm_model, model_dir=model_path))
    a = regressor.score(x=test_X, y=test_y)
    predicted_data = [[pred] for pred in regressor.predict(test_X)]
    return predicted_data
    predictions = tf.contrib.layers.fully_connected(output, 1, None)

    # 将predictions和labels调整统一的shape
    labels = tf.reshape(y, [-1])
    predictions = tf.reshape(predictions, [-1])
    print("predictions.shape:",predictions.shape)
    print("labels.shape:",labels.shape)
    loss = tf.losses.mean_squared_error(predictions, labels)
    train_op = tf.contrib.layers.optimize_loss(loss, tf.contrib.framework.get_global_step(),
                                             optimizer="Adagrad",
                                             learning_rate=0.1)
    return predictions, loss, train_op
# In[]
# 进行训练
# 封装之前定义的lstm
regressor = SKCompat(learn.Estimator(model_fn=lstm_model, model_dir=MODEL_PATH))
#regressor = learn.Estimator(model_fn=lstm_model, model_dir=MODEL_PATH)
# 生成数据
train_X, train_y = generate_data(normalize_data[0:train_length])
test_X, test_y = generate_data(normalize_data[train_length:data_length])
train_X=np.transpose(train_X,[0,2,1])
train_y=np.transpose(train_y,[0,2,1])
test_X=np.transpose(test_X,[0,2,1])
test_y=np.transpose(test_y,[0,2,1])
# 拟合数据
# In[]
regressor.fit(train_X, train_y, batch_size=BATCH_SIZE, steps=TRAINING_STEPS)
# 计算预测值
# In[]
#predicted = [[pred] for pred in regressor.predict(test_X)]
regressor.score(test_X,test_y)
Exemplo n.º 21
0
        train_op = tf.contrib.layers.optimize_loss(loss,
                                                   tf.contrib.framework.get_global_step(),
                                                   optimizer='Adagrad',
                                                   learning_rate=0.6)
        '''返回预测值、损失函数及优化器'''
        return predictions, loss, train_op



# +
a = MyLstm()
'''载入tf中仿sklearn训练方式的模块'''
learn = tf.contrib.learn

'''初始化我们的LSTM模型,并保存到工作目录下以方便进行增量学习'''
regressor = SKCompat(learn.Estimator(model_fn=a.lstm_model, model_dir='Model/model_2'))

# +
'''对原数据进行尺度缩放'''
data = data_processing(data)

'''将所有样本来作为训练样本'''
train_X, train_y = generate_data(data)

'''将所有样本作为测试样本'''
test_X, test_y = generate_data(data)

'''以仿sklearn的形式训练模型,这里指定了训练批尺寸和训练轮数'''
regressor.fit(train_X, train_y, batch_size=BATCH_SIZE, steps=TRAINING_STEPS)

# +
Exemplo n.º 22
0
import numpy as np
from tensorflow.contrib.learn.python.learn.estimators.estimator import SKCompat

learn = tf.contrib.learn


def my_model(features, target):
    target = tf.one_hot(target, 3, 1, 0)
    logits = tf.contrib.layers.fully_connected(features, 3, tf.nn.softmax)
    loss = tf.losses.softmax_cross_entropy(target, logits)
    train_op = tf.contrib.layers.optimize_loss(
        loss,
        tf.contrib.framework.get_global_step(),
        optimizer='Adam',
        learning_rate=0.01)
    return tf.argmax(logits, 1), loss, train_op


iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
    iris.data, iris.target, test_size=0.2, random_state=0)

x_train, x_test = map(np.float32, [x_train, x_test])

classifier = SKCompat(
    learn.Estimator(model_fn=my_model, model_dir='temp/c8/mdl'))
classifier.fit(x_train, y_train, steps=800)
y_pred = classifier.predict(x_test)
acc = metrics.accuracy_score(y_test, y_pred)
print(f'Accuracy: {acc}')
    predictions = tf.reshape(predictions, [-1])
    print("predictions.shape:", predictions.shape)
    print("labels.shape:", labels.shape)
    loss = tf.losses.mean_squared_error(predictions, labels)
    train_op = tf.contrib.layers.optimize_loss(
        loss,
        tf.contrib.framework.get_global_step(),
        optimizer="Adagrad",
        learning_rate=0.1)
    return predictions, loss, train_op


# In[]
# 进行训练
# 封装之前定义的lstm
regressor = SKCompat(learn.Estimator(model_fn=lstm_model,
                                     model_dir=MODEL_PATH))
#regressor = learn.Estimator(model_fn=lstm_model, model_dir=MODEL_PATH)
# 生成数据
train_X, train_y = generate_data(normalize_data[0:train_length])
test_X, test_y = generate_data(normalize_data[train_length:data_length])
train_X = np.transpose(train_X, [0, 2, 1])
train_y = np.transpose(train_y, [0, 2, 1])
test_X = np.transpose(test_X, [0, 2, 1])
test_y = np.transpose(test_y, [0, 2, 1])
# 拟合数据
# In[]
#regressor.fit(train_X, train_y, batch_size=BATCH_SIZE, steps=TRAINING_STEPS)
# 计算预测值
# In[]
#predicted = [[pred] for pred in regressor.predict(test_X)]
regressor.score(test_X, test_y)
Exemplo n.º 24
0
def train(InputDF, TargetDF):
    print "*" * 50, "Training a rnn network", "*" * 50
    num_features = len(InputDF.columns)
    num_stocks = len(TargetDF.columns)
    print "num stocks %s,last train data %s,first train data %s" % (num_stocks, TargetDF.index[-1], TargetDF.index[0])

    # 生成数据
    used_size = (len(InputDF) // BATCH_SIZE) * BATCH_SIZE  # 要BATCH_SIZE整数倍
    train_X, train_y = InputDF[-used_size:].values, TargetDF[-used_size:].values
    test_X, test_y = InputDF[-BATCH_SIZE:].values, TargetDF[-BATCH_SIZE:].values  # TODO
    train_X = train_X.astype(np.float32)
    train_y = train_y.astype(np.float32)
    test_X = test_X.astype(np.float32)
    test_y = test_y.astype(np.float32)
    print np.shape(train_X), np.shape(train_y)
    print "Train Set <X:y> shape"
    print "Train Data Count:%s , Feather Count:%s , Stock Count:%s" % (
        len(train_X), num_features, num_stocks)  # 3300 个股票日? 股票没有那么多,500个

    NUM_TRAIN_BATCHES = int(len(train_X) / BATCH_SIZE)
    ATTN_LENGTH = 10
    dropout_keep_prob = 0.5

    def LstmCell():
        lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(RNN_HIDDEN_SIZE, state_is_tuple=True)
        return lstm_cell

    def makeGRUCells():
        cells = []
        for i in range(NUM_LAYERS):
            cell = tf.nn.rnn_cell.GRUCell(num_units=RNN_HIDDEN_SIZE)
            if len(cells) == 0:
                # Add attention wrapper to first layer.
                cell = tf.contrib.rnn.AttentionCellWrapper(
                    cell, attn_length=ATTN_LENGTH, state_is_tuple=True)
            cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=dropout_keep_prob)
            cells.append(cell)
        attn_cell = tf.nn.rnn_cell.MultiRNNCell(cells,
                                                state_is_tuple=True)  # GRUCell必须false,True 比错 ,如果是BasicLSTMCell 必须True
        return attn_cell

    def lstm_model(X, y):
        cell = makeGRUCells()
        ''' #理论dynnamic rnn 首选,但计算速度相比静态慢很多,不知何故
       output, _ = tf.nn.dynamic_rnn(
                                      cell,
                                      inputs=tf.expand_dims(X, -1),
                                      dtype=tf.float32,
                                      time_major=False
                                      )
        '''
        split_inputs = tf.reshape(X, shape=[1, BATCH_SIZE,
                                            num_features])  # Each item in the batch is a time step, iterate through them
        # print split_inputs
        split_inputs = tf.unstack(split_inputs, axis=1, name="unpack_l1")
        output, _ = tf.nn.static_rnn(cell,
                                     inputs=split_inputs,
                                     dtype=tf.float32
                                     )

        output = tf.transpose(output, [1, 0, 2])
        output = output[-1]
        # 通过无激活函数的全连接层,计算就是线性回归,并将数据压缩成一维数组结构
        predictions = tf.contrib.layers.fully_connected(output, num_stocks, None)
        labels = y
        loss = tf.losses.mean_squared_error(predictions, labels)
        # print "lost:",loss
        train_op = tf.contrib.layers.optimize_loss(loss, tf.contrib.framework.get_global_step(),
                                                   optimizer="Adagrad",
                                                   learning_rate=lr)
        return predictions, loss, train_op

    PRINT_STEPS = 100
    validation_monitor = learn.monitors.ValidationMonitor(test_X, test_y,
                                                          every_n_steps=PRINT_STEPS,
                                                          early_stopping_rounds=1000)
    # 进行训练
    regressor = SKCompat(learn.Estimator(model_fn=lstm_model,
                                         # model_dir="Models/model_0",
                                         config=tf.contrib.learn.RunConfig(
                                             save_checkpoints_steps=100,
                                             save_checkpoints_secs=None,
                                             save_summary_steps=100,
                                         )))

    print "Total Train Step: ", NUM_TRAIN_BATCHES * NUM_EPOCHS
    print "*" * 50, "Training a rnn regress task now", "*" * 50
    regressor.fit(train_X, train_y, batch_size=BATCH_SIZE,
                  steps=NUM_TRAIN_BATCHES * NUM_EPOCHS)  # steps=train_labels.shape[0]/batch_size * epochs,

    print "*" * 50, "Predict tomorrow stock price now", "*" * 50
    pred = regressor.predict(test_X[-BATCH_SIZE:])  # 使用最后21天预测 未来5天的股票价格

    return pred