Example #1
0
def nn_test(B, label, train_num):
    B = torch.tensor(B, dtype=torch.float32)
    net = Net(B.shape[1], B.shape[1] * 4, label.shape[1])
    loss = nm.CrossEntropyLoss()
    optimizer = torch.optim.Adam(net.parameters(), lr=0.01)
    label = torch.tensor(label)
    fit(net, B, label, train_num, loss, optimizer)
    return
Example #2
0
def train_validate(model, train_loader, epoch):
    '''超参数'''
    batch_size = 256
    learning_rate = 1e-2
    num_epoches = 10
    '''加载数据集'''
    train_dataset, train_loader, test_dataset, test_loader = load_data()
    # 创建模型
    model = VGG16()
    model.train()
    '''定义loss 和optimizer'''
    criterion = modules.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=learning_rate)

    # 查看是否有GPU
    use_gpu = torch.cuda.is_available()
    if use_gpu:
        print("use gpu")
        model = model.cuda()
    '''训练测试模型'''
    # for epoch in range(num_epoches):
    print('*' * 25, 'epoch {}'.format(epoch + 1),
          '*' * 25)  # .format为输出格式,formet括号里的即为左边花括号的输出
    running_loss = 0.0
    running_acc = 0.0
    # for i ,data in tqdm(enumerate(train_loader,1)):
    '''训练模型'''
    for i, data in tqdm(enumerate(train_loader, 1)):
        img, label = data
        print(img.shape)
        # cuda
        if use_gpu:
            img = img.cuda()
            label = label.cuda()
        img = Variable(img)
        label = Variable(label)
        # 向前传播
        out = model(img)
        loss = criterion(out, label)
        running_loss += loss.item() * label.size(0)
        _, pred = torch.max(out, 1)  # 预测最大值所在的位置标签
        num_correct = (pred == label).sum()
        accuracy = (pred == label).float().mean()
        running_acc += num_correct.item()
        # 向后传播
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        print('Finish {} epoch, Loss: {:.6f}, Acc: {:.6f}'.format(
            epoch + 1, running_loss / (len(train_dataset)),
            running_acc / (len(train_dataset))))
    '''保存训练好的模型  pt pth pkl rar'''
    torch.save(model.state_dict(), "./cnn.pth")
Example #3
0
    test_dataset = datasets.CIFAR10("I:\datasets",
                                    train=False,
                                    transform=transforms.ToTensor(),
                                    download=True)
    test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False)
    return train_dataset, train_loader, test_dataset, test_loader


'''超参数'''
batch_size = 256
learning_rate = 1e-2
num_epoches = 10
# 创建模型
model = VGG16()
'''定义loss 和optimizer'''
criterion = modules.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=learning_rate)


def train_validate(model, train_loader, epoch):
    '''超参数'''
    batch_size = 256
    learning_rate = 1e-2
    num_epoches = 10
    '''加载数据集'''
    train_dataset, train_loader, test_dataset, test_loader = load_data()
    # 创建模型
    model = VGG16()
    model.train()
    '''定义loss 和optimizer'''
    criterion = modules.CrossEntropyLoss()
Example #4
0
def train(gpu=True):

    if not os.path.exists("model_save"):
        os.mkdir("model_save")
    MAX_STEPS = 2000

    batch_size = 16
    n_head = 5
    seq_len = 100

    train_iter, val_iter = create_dict(batch_size)
    en_vocb_size = len(EN_TEXT.vocab)
    zh_vocb_size = len(ZH_TEXT.vocab)
    print("EN-Vocab size:{}".format(en_vocb_size))
    print("ZH-Vocab size:{}".format(zh_vocb_size))

    model = MyTransformer(dict_size_1=en_vocb_size,
                          dict_size_2=zh_vocb_size,
                          embedding_dim=100,
                          nhead=n_head)
    if gpu:
        model = model.cuda()

    loss_fn = nn.CrossEntropyLoss()
    opt = torch.optim.Adam(model.parameters(), lr=0.0001)

    logger = Logger("log.txt", print_flag=True)
    EPOCH = 1
    for epoch in range(EPOCH):
        loss_epoch = 0
        counter = 0

        for batch in train_iter:
            counter += 1
            # batch 的尺寸是 (seq_len, batch_size)
            # 这里进行一下维度调整。
            input = batch.en.permute(1, 0)  # batch_size, seq_len
            target = batch.zh.permute(1, 0)
            # 准备mask
            input_mask = create_padding_mask(input, n_head=n_head)
            # print(input_mask.shape)
            # print(input_mask.shape)
            # print(input_mask)
            target_behind_mask = create_behind_mask(batch_size, n_head,
                                                    seq_len)
            target_padding_mask = create_padding_mask(target, n_head=n_head)
            target_mask = torch.max(target_behind_mask, target_padding_mask)
            # print("inputMask \n",input_mask)
            # print("tgtMask \n",target_mask)

            if gpu:
                input = input.cuda()
                target = target.cuda()
                input_mask = input_mask.cuda()
                target_mask = target_mask.cuda()

            # 喂入模型
            output = model(input,
                           target,
                           src_mask=input_mask,
                           tgt_mask=target_mask)  # B,L,E
            target = target.reshape(batch_size * seq_len, -1).squeeze()
            output = output.reshape(batch_size * seq_len, -1)

            opt.zero_grad()
            loss = loss_fn(output, target)
            print(loss)
            loss.backward()
            # Update parameters
            opt.step()
            loss = loss.cpu()
            loss_epoch += loss

        logger.log("epoch:{} | loss:{}".format(epoch, loss_epoch / counter))
        if epoch % 10 == 0 and epoch != 0:
            torch.save(model, "model_save/model_{}.model".format(epoch))
    torch.save(model, "model_save/model_final.model")

    print("save model to model_save")