Ejemplo n.º 1
0
def train():

    model = lstm(input_size=args.input_size,
                 hidden_size=args.hidden_size,
                 num_layers=args.layers,
                 output_size=1,
                 dropout=args.dropout,
                 batch_first=args.batch_first)
    model.to(args.device)

    # 是否需要继续训练
    if args.useGPU:
        checkpoint = torch.load(args.save_file)
    else:
        checkpoint = torch.load(args.save_file,
                                map_location=lambda storage, loc: storage)
    model.load_state_dict(checkpoint['state_dict'])

    criterion = nn.MSELoss()  # 定义损失函数
    # optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)  # Adam梯度下降  学习率=0.001
    optimizer = torch.optim.RMSprop(model.parameters(), lr=args.lr)

    close_max, close_min, train_loader, test_loader = getData(
        args.corpusFile, args.sequence_length, args.batch_size)
    for i in range(args.epochs):
        total_loss = 0
        for idx, (data, label) in enumerate(train_loader):
            if args.useGPU:
                data1 = data.squeeze(1).cuda()
                pred = model(Variable(data1).cuda())
                # print(pred.shape)
                pred = pred[1, :, :]
                label = label.unsqueeze(1).cuda()
                # print(label.shape)
            else:
                data1 = data.squeeze(1)
                pred = model(Variable(data1))
                pred = pred[1, :, :]
                label = label.unsqueeze(1)
            loss = criterion(pred, label)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            total_loss += loss.item()
        if i % args.saveepochs == 0:
            # torch.save(model, args.save_file)
            torch.save({'state_dict': model.state_dict()}, args.save_file)
            print('第%d epoch,保存模型' % i)
            print(total_loss)
    # torch.save(model, args.save_file)
    torch.save({'state_dict': model.state_dict()}, args.save_file)
Ejemplo n.º 2
0
def eval():
    # model = torch.load(args.save_file)
    model = lstm(input_size=args.input_size,
                 hidden_size=args.hidden_size,
                 num_layers=args.layers,
                 output_size=1)
    model.to(args.device)
    if args.useGPU:
        checkpoint = torch.load(args.save_file)
    else:
        checkpoint = torch.load(args.save_file,
                                map_location=lambda storage, loc: storage)
    model.load_state_dict(checkpoint['state_dict'])
    preds = []
    labels = []
    close_max, close_min, train_loader, test_loader = getData(
        args.corpusFile, args.sequence_length, args.batch_size)
    # for idx, (x, label) in enumerate(test_loader):
    for idx, (x, label) in enumerate(train_loader):
        if args.useGPU:
            x = x.squeeze(1).cuda()  # batch_size,seq_len,input_size
        else:
            x = x.squeeze(1)
        pred = model(x)
        list = pred.data.squeeze(1).tolist()
        preds.extend(list[-1])
        labels.extend(label.tolist())
    # print(preds)
    count = 0
    acc = 0
    for i in range(len(preds) - 1):
        # print('预测值是%.2f,真实值是%.2f'  % (
        # preds[i][0] * (close_max - close_min) + close_min, labels[i][0] * (close_max - close_min) + close_min))
        # print("%.2f" % preds[i][0],end=", ")
        if (preds[i][0] > 0):
            print("预测会涨", end=", ")
        else:
            print("预测会跌", end=", ")
        if (labels[i] == 1.0):
            print("实际涨了")
        else:
            print("实际跌了")
        if ((preds[i][0] >= 0 and labels[i] == 1.0)
                or (preds[i][0] <= 0 and labels[i] == -1.0)):
            acc += 1
        count += 1
    print(acc, count)
    print("实际预测准确率{0}%".format(acc * 1.0 / count * 100))
Ejemplo n.º 3
0
def eval():
    # model = torch.load(args.save_file)
    model = lstm(input_size=args.input_size, hidden_size=args.hidden_size, num_layers=args.layers , output_size=1)
    model.to(args.device)
    checkpoint = torch.load(args.save_file)
    model.load_state_dict(checkpoint['state_dict'])
    preds = []
    labels = []
    close_max, close_min, train_loader, test_loader = getData(args.corpusFile, args.sequence_length, args.batch_size)
    for idx, (x, label) in enumerate(test_loader):
        if args.useGPU:
            x = x.squeeze(1).cuda()  # batch_size,seq_len,input_size
        else:
            x = x.squeeze(1)
        pred = model(x)
        list = pred.data.squeeze(1).tolist()
        preds.extend(list[-1])
        labels.extend(label.tolist())

    for i in range(len(preds)):
        print('预测值是%.2f,真实值是%.2f' % (
        preds[i][0] * (close_max - close_min) + close_min, labels[i] * (close_max - close_min) + close_min))
def eval():
    # model = torch.load(args.save_file)
    model = lstm(input_size=args.input_size, hidden_size=args.hidden_size, num_layers=args.layers , output_size=1)
    model.to(args.device)
    checkpoint = torch.load(args.save_file)
    model.load_state_dict(checkpoint['state_dict'])

    criterion = nn.MSELoss()  # 定义损失函数
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)  # Adam梯度下降  学习率=0.001

    preds = []
    labels = []
    close_max, close_min, train_loader, test_loader = getData(args.corpusFile, args.sequence_length, args.batch_size)
    for idx, (x, label) in enumerate(test_loader):
        if args.useGPU:
            x = x.squeeze(1).cuda()  # batch_size,seq_len,input_size
        else:
            x = x.squeeze(1)
        pred = model(Variable(x))

        loss = criterion(pred, label)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        list = pred.data.squeeze(1).tolist()
        preds.extend(list[-1])
        labels.extend(label.tolist())

    f = open('pred.csv','w',encoding='utf-8',newline='')
    csv_writer = csv.writer(f)

    for i in range(len(preds)):
        print('预测值是%.2f,真实值是%.2f' % (
        preds[i][0] * (close_max - close_min) + close_min, labels[i] * (close_max - close_min) + close_min))
        csv_writer.writerow([preds[i][0],labels[i]])


    f.close()