Exemplo n.º 1
0
def preprocess_data(filename='/content/drive/My Drive/fer2013.csv',
                    image_size=(48, 48)):
    X, Y = getData(filename)
    num_class = len(set(Y))

    # balance = balance_class(Y)

    N, D = X.shape
    X = X.reshape(N, image_size, 1)

    return give_train_test_splits(X, Y, test_size=0.1,
                                  random_state=0), num_class
Exemplo n.º 2
0
def train():

    model = lstm(input_size=args.input_size,
                 hidden_size=args.hidden_size,
                 num_layers=args.layers,
                 output_size=1,
                 dropout=args.dropout,
                 batch_first=args.batch_first)
    model.to(args.device)

    # 是否需要继续训练
    if args.useGPU:
        checkpoint = torch.load(args.save_file)
    else:
        checkpoint = torch.load(args.save_file,
                                map_location=lambda storage, loc: storage)
    model.load_state_dict(checkpoint['state_dict'])

    criterion = nn.MSELoss()  # 定义损失函数
    # optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)  # Adam梯度下降  学习率=0.001
    optimizer = torch.optim.RMSprop(model.parameters(), lr=args.lr)

    close_max, close_min, train_loader, test_loader = getData(
        args.corpusFile, args.sequence_length, args.batch_size)
    for i in range(args.epochs):
        total_loss = 0
        for idx, (data, label) in enumerate(train_loader):
            if args.useGPU:
                data1 = data.squeeze(1).cuda()
                pred = model(Variable(data1).cuda())
                # print(pred.shape)
                pred = pred[1, :, :]
                label = label.unsqueeze(1).cuda()
                # print(label.shape)
            else:
                data1 = data.squeeze(1)
                pred = model(Variable(data1))
                pred = pred[1, :, :]
                label = label.unsqueeze(1)
            loss = criterion(pred, label)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            total_loss += loss.item()
        if i % args.saveepochs == 0:
            # torch.save(model, args.save_file)
            torch.save({'state_dict': model.state_dict()}, args.save_file)
            print('第%d epoch,保存模型' % i)
            print(total_loss)
    # torch.save(model, args.save_file)
    torch.save({'state_dict': model.state_dict()}, args.save_file)
Exemplo n.º 3
0
def eval():
    # model = torch.load(args.save_file)
    model = lstm(input_size=args.input_size,
                 hidden_size=args.hidden_size,
                 num_layers=args.layers,
                 output_size=1)
    model.to(args.device)
    if args.useGPU:
        checkpoint = torch.load(args.save_file)
    else:
        checkpoint = torch.load(args.save_file,
                                map_location=lambda storage, loc: storage)
    model.load_state_dict(checkpoint['state_dict'])
    preds = []
    labels = []
    close_max, close_min, train_loader, test_loader = getData(
        args.corpusFile, args.sequence_length, args.batch_size)
    # for idx, (x, label) in enumerate(test_loader):
    for idx, (x, label) in enumerate(train_loader):
        if args.useGPU:
            x = x.squeeze(1).cuda()  # batch_size,seq_len,input_size
        else:
            x = x.squeeze(1)
        pred = model(x)
        list = pred.data.squeeze(1).tolist()
        preds.extend(list[-1])
        labels.extend(label.tolist())
    # print(preds)
    count = 0
    acc = 0
    for i in range(len(preds) - 1):
        # print('预测值是%.2f,真实值是%.2f'  % (
        # preds[i][0] * (close_max - close_min) + close_min, labels[i][0] * (close_max - close_min) + close_min))
        # print("%.2f" % preds[i][0],end=", ")
        if (preds[i][0] > 0):
            print("预测会涨", end=", ")
        else:
            print("预测会跌", end=", ")
        if (labels[i] == 1.0):
            print("实际涨了")
        else:
            print("实际跌了")
        if ((preds[i][0] >= 0 and labels[i] == 1.0)
                or (preds[i][0] <= 0 and labels[i] == -1.0)):
            acc += 1
        count += 1
    print(acc, count)
    print("实际预测准确率{0}%".format(acc * 1.0 / count * 100))
Exemplo n.º 4
0
def eval():
    # model = torch.load(args.save_file)
    model = lstm(input_size=args.input_size, hidden_size=args.hidden_size, num_layers=args.layers , output_size=1)
    model.to(args.device)
    checkpoint = torch.load(args.save_file)
    model.load_state_dict(checkpoint['state_dict'])
    preds = []
    labels = []
    close_max, close_min, train_loader, test_loader = getData(args.corpusFile, args.sequence_length, args.batch_size)
    for idx, (x, label) in enumerate(test_loader):
        if args.useGPU:
            x = x.squeeze(1).cuda()  # batch_size,seq_len,input_size
        else:
            x = x.squeeze(1)
        pred = model(x)
        list = pred.data.squeeze(1).tolist()
        preds.extend(list[-1])
        labels.extend(label.tolist())

    for i in range(len(preds)):
        print('预测值是%.2f,真实值是%.2f' % (
        preds[i][0] * (close_max - close_min) + close_min, labels[i] * (close_max - close_min) + close_min))
def eval():
    # model = torch.load(args.save_file)
    model = lstm(input_size=args.input_size, hidden_size=args.hidden_size, num_layers=args.layers , output_size=1)
    model.to(args.device)
    checkpoint = torch.load(args.save_file)
    model.load_state_dict(checkpoint['state_dict'])

    criterion = nn.MSELoss()  # 定义损失函数
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)  # Adam梯度下降  学习率=0.001

    preds = []
    labels = []
    close_max, close_min, train_loader, test_loader = getData(args.corpusFile, args.sequence_length, args.batch_size)
    for idx, (x, label) in enumerate(test_loader):
        if args.useGPU:
            x = x.squeeze(1).cuda()  # batch_size,seq_len,input_size
        else:
            x = x.squeeze(1)
        pred = model(Variable(x))

        loss = criterion(pred, label)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        list = pred.data.squeeze(1).tolist()
        preds.extend(list[-1])
        labels.extend(label.tolist())

    f = open('pred.csv','w',encoding='utf-8',newline='')
    csv_writer = csv.writer(f)

    for i in range(len(preds)):
        print('预测值是%.2f,真实值是%.2f' % (
        preds[i][0] * (close_max - close_min) + close_min, labels[i] * (close_max - close_min) + close_min))
        csv_writer.writerow([preds[i][0],labels[i]])


    f.close()
Exemplo n.º 6
0
from sklearn.model_selection import GridSearchCV
import pandas as pd
import warnings
import matplotlib.pyplot as plt
import statsmodels.api as sm
from sklearn.svm import SVR
from dataset import getData, plot, RMSPE, RMSE
import numpy as np
import time

start = time.clock()
warnings.filterwarnings('ignore')
X_train, Y_train, X_test, Y_test = getData()

#X_train.shape (882, 6)
#Y_train.shape (882,)

print(X_train)
print(Y_train)
'''
svr = GridSearchCV(SVR(), param_grid={"kernel": ("linear", 'rbf'), "C": np.logspace(-3, 3, 7), "gamma": np.logspace(-3, 3, 7)})

svr.fit(X_train, Y_train)



y_predict = svr.predict(X_test)

open = X_test['Open'].values
for i in range(len(open)):
    if open[i] == 0:
Exemplo n.º 7
0
        #rmse = np.sqrt(mean_squared_error(test_predict,test_y))
        #mae = mean_absolute_error(y_pred=test_predict, y_true=test_y)
        #print('mae:', mae, '   rmse:', rmse)
    return test_predict,xx,yy


test_predict,xx,yy = train_lstm(batch_size=80,time_step=15,train_begin=0,train_end=882)


predict = []
for i in range(4):
    for j in test_predict[i]:
        predict.append(j)


X_train1,Y_train1,X_test1,Y_test1 = getData()
open = X_test1['Open'].values
for i in range(len(open)):
    if open[i] == 0:
        predict[i] = 0

print(predict)
predict = pd.DataFrame({'Date':pd.date_range(start='2015-06-02',end='2015-07-31'),'Sales':predict})
predict = predict.set_index('Date')


rmspe = RMSPE(Y_test1.values,predict.values)
print(rmspe)

rmse = RMSE(Y_test1.values,predict.values)
print(rmse)