示例#1
0
        # Forward pass
        outputs = model(words)
        # if y would be one-hot, we must apply
        # labels = torch.max(labels, 1)[1]
        loss = criterion(outputs, labels)

        # Backward and optimize
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    if (epoch + 1) % 100 == 0:
        print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')

print(f'final loss: {loss.item():.4f}')

data = {
    "model_state": model.state_dict(),
    "input_size": input_size,
    "hidden_size": hidden_size,
    "output_size": output_size,
    "all_words": all_words,
    "tags": tags
}

FILE = "data.pth"
torch.save(data, FILE)

print(f'training complete. file saved to {FILE}')
for epoch in range(num_epochs):
    for (words, labels) in train_loader:
        words = words.to(device)
        labels = labels.to(device)

        output = model(words)
        loss = criterion(output, labels)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    if (epoch + 1) % 100 == 0:
        print(f'epoch[{epoch+1}/{num_epochs}], Loss:{loss.item():.4f}')

print(f'Final Loss:{loss.item():.4f}')

data = {
    'model_state': model.state_dict(),
    'input_size': input_size,
    'output_size': output_size,
    'hidden_size': hidden_size,
    'all_words': all_words,
    'tags': tags
}

FILE = 'bible2.pth'
torch.save(data, FILE)

print(f'training done file is saved in {FILE}')
示例#3
0
        # bkwd pass and optimization
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if (i + 1) % 10 == 0:
            print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(
                epoch + 1, num_epochs, i + 1, total_step, loss.item()))
    print('Epoch [{}/{}], Accuracy: {:.4f}'.format(
        epoch + 1, num_epochs, 100 * correct_predictions / total_predictions))

# TEST
with torch.no_grad():
    correct = 0
    total = 0
    for data_batch in test_loader:
        vis_feats = data_batch['vis_feats'].to(device)
        labels = data_batch['labels'].to(device)

        outputs = model(vis_feats.float())
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()
        #TODO print confusion matrix

    print('Accuracy over entire test set: {} %'.format(100 * correct / total))

# Save the model checkpoint
torch.save(model.state_dict(), 'model.ckpt')
                    for k, acc in zip(top_k, train_kacc)
                ]), logfile)

        # TEST
        with torch.no_grad():
            valid_loss, valid_kacc = process(model, valid_data, top_k,
                                             criterion, None)
        log(
            f"VALID LOSS: {valid_loss:0.3f} " + "".join(
                [f" acc@{k}: {acc:0.3f}"
                 for k, acc in zip(top_k, valid_kacc)]), logfile)

        if valid_loss < best_loss:
            plateau_count = 0
            best_loss = valid_loss
            torch.save(model.state_dict(),
                       os.path.join(running_dir, 'best_params.pkl'))
            log(f"  best model so far", logfile)
        else:
            plateau_count += 1
            if plateau_count % early_stopping == 0:
                log(
                    f"  {plateau_count} epochs without improvement, early stopping",
                    logfile)
                break
            if plateau_count % patience == 0:
                lr_scheduler.step()
                log(
                    f"  {plateau_count} epochs without improvement, decreasing learning rate to {lr_scheduler.get_lr()}",
                    logfile)
示例#5
0
        loss = criterion(outputs, label)

        # backward pass
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
    if(epoch+1) % 100 == 0:
        print(f'epoch {epoch+1}/{numEpoch},loss = {loss.item():.4f} ')

# print the final loss
print(f'final ::loss = {loss.item():.4f} ')

# save the model
data = {
    "modelState": model.state_dict(),
    "input_size": inputSize,
    "output_size": outputSize,
    "hidden_size": hiddenSize,
    "allWords_size": allWords,
    "tags": tags,
}

FILE = "data.pth"
torch.save(data, FILE)
print(f'training complete. file saved to {FILE}')


'''
# listen to the voice and convert to text
示例#6
0
def main(read_dir="high.json", write_dir="high.pth"):

    base_json_dir = "resource/jsonFile/" + read_dir
    base_pth_dir = "resource/pthFile/" + write_dir

    with open(base_json_dir, "r", encoding="UTF-8") as file:
        intents = json.load(file)

    all_words = []
    tags = []
    xy = []

    for intent in intents['intents']:
        tag = intent['tag']
        tags.append(tag)
        for pattern in intent['patterns']:
            w = tokenize(pattern)
            all_words.extend(w)
            xy.append((w, tag))

    ignore_word = [",", ".", "'", '"', "?", "!", "^", "@", "#", "_", "-",
                   "~"]  #we need, regular expression
    all_words = [stem(w) for w in all_words
                 if w not in ignore_word]  #this is better than using map
    all_words = sorted(set(all_words))
    tags = sorted(set(tags))  # for order

    X_train = []
    Y_train = []
    for (pattern_sentence, tag) in xy:
        bag = bag_of_words(pattern_sentence, all_words)
        X_train.append(bag)

        label = tags.index(tag)
        Y_train.append(label)

    X_train = np.array(X_train)
    Y_train = np.array(Y_train)

    # Hyper-parameters
    num_epochs = 1000
    batch_size = 8
    learning_rate = 0.001
    input_size = len(X_train[0])
    hidden_size = 8
    output_size = len(tags)

    class ChatDataset(Dataset):
        def __init__(self):
            self.n_samples = len(X_train)
            self.x_data = X_train
            self.y_data = Y_train

        # support indexing such that dataset[i] can be used to get i-th sample
        def __getitem__(self, index):
            return self.x_data[index], self.y_data[index]

        # we can call len(dataset) to return the size
        def __len__(self):
            return self.n_samples

    dataset = ChatDataset()
    train_loader = DataLoader(dataset=dataset,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=0)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    model = NeuralNet(input_size, hidden_size, output_size).to(device)

    # Loss and optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    # Train the model
    for epoch in range(num_epochs):
        for (words, labels) in train_loader:
            words = words.to(device)
            labels = labels.to(dtype=torch.long).to(device)

            # Forward pass
            outputs = model(words)
            # if y would be one-hot, we must apply
            # labels = torch.max(labels, 1)[1]
            loss = criterion(outputs, labels)

            # Backward and optimize
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        if (epoch + 1) % 100 == 0:
            print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')

    print(f'final loss: {loss.item():.4f}')

    data = {
        "model_state": model.state_dict(),
        "input_size": input_size,
        "hidden_size": hidden_size,
        "output_size": output_size,
        "all_words": all_words,
        "tags": tags
    }

    torch.save(data, base_pth_dir)

    print(f'training complete. write_dir saved to {base_pth_dir}')
    for idx, (inputs, labels) in enumerate(train_loader):
        inputs = inputs.reshape(-1, 28 * 28).to(device)
        labels = labels.to(device)
        # Forward propagation
        outputs = model(inputs)
        loss = loss_fn(outputs, labels)
        # backward pass and make step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_loss.append(loss.item())
        cnt += 1
        epoch_counter.append(cnt)
        if ((idx + 1) % 100 == 0):
            print("epoch is {}/{} Step is: {}/{} loss is: {}".format(
                epoch, num_epochs, idx, num_batches, loss.item()))
plt.plot(epoch_counter, train_loss)
torch.save(model.state_dict(), 'model.pth')
torch.save(model.state_dict(), 'optimizer.pth')
with torch.no_grad():
    correct = 0
    total = 0
    for idx, (inputs, labels) in enumerate(test_loader):
        inputs = inputs.reshape(-1, 28 * 28).to(device)
        labels = labels.to(device)
        preds = model(inputs)
        values, indices = torch.max(preds, 1)
        total += labels.shape[0]
        correct += (labels == indices).sum().item()
    print("Accuracy of the network is: {}%".format(100 * correct / total))
示例#8
0
def train(args, labeled, resume_from, ckpt_file):
    print("========== In the train step ==========")
    batch_size = args["batch_size"]
    lr = args["learning_rate"]
    momentum = args["momentum"]
    epochs = args["train_epochs"]

    train_split = args["split_train"]

    CSV_FILE = "./data/mushrooms.csv"
    dataset = MushroomDataset(CSV_FILE)

    train_dataset = torch.utils.data.Subset(
        dataset, list(range(int(train_split * len(dataset))))
    )

    train_subset = Subset(train_dataset, labeled)

    train_loader = DataLoader(train_subset, batch_size=batch_size, shuffle=True)

    net = NeuralNet()
    net = net.to(device=device)

    criterion = torch.nn.BCELoss()
    optimizer = optim.SGD(net.parameters(), lr=float(lr), momentum=momentum)

    if resume_from is not None:
        ckpt = torch.load(os.path.join(args["EXPT_DIR"], resume_from + ".pth"))
        net.load_state_dict(ckpt["model"])
        optimizer.load_state_dict(ckpt["optimizer"])
    else:
        getdatasetstate(args)

    net.train()

    for epoch in tqdm(range(args["train_epochs"]), desc="Training"):

        running_loss = 0

        for i, batch in enumerate(train_loader, start=0):
            data, labels = batch

            data = data.to(device)
            labels = labels.to(device)

            optimizer.zero_grad()
            output = net(data)
            loss = criterion(output, labels)
            loss.backward()
            optimizer.step()

            running_loss += loss.item()

            if i % 1000:
                print(
                    "epoch: {} batch: {} running-loss: {}".format(
                        epoch + 1, i + 1, running_loss / 1000
                    ),
                    end="\r",
                )
                running_loss = 0

    print("Finished Training. Saving the model as {}".format(ckpt_file))

    ckpt = {"model": net.state_dict(), "optimizer": optimizer.state_dict()}
    torch.save(ckpt, os.path.join(args["EXPT_DIR"], ckpt_file + ".pth"))

    return