Exemplo n.º 1
0
def train(X, y, epochs=10, batch_size=16):
    dataset = IrisDataset(X, y)
    num_examples = len(dataset)
    loader = torch.utils.data.DataLoader(dataset,
                                         batch_size=batch_size,
                                         shuffle=True)
    print(X.shape[1])
    model = IrisNet(input_dim=X.shape[1])
    optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
    criterion = torch.nn.CrossEntropyLoss()

    for epoch in range(1, epochs + 1):
        num_correct = 0
        for i, (inputs, labels) in enumerate(loader):
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            num_correct += (labels == outputs.argmax(1)).sum()
            loss.backward()
            optimizer.step()
        print(
            f"Finished: {epoch}, accuracy: {round(num_correct.float().numpy() / num_examples, 4) * 100}%"
        )

    return model
Exemplo n.º 2
0
def run(device):
    net = IrisNet()
    net.to(device)

    optimizer = Adam(net.parameters(), lr=args.lr)
    criterion = nn.CrossEntropyLoss()
    train_set = IrisDataset(args.dpath)
    train_loader = DataLoader(train_set,
                              batch_size=args.batch_size,
                              shuffle=True)

    losses = []
    accuracy = []
    # Begin training
    for ep in range(args.epochs):
        ep_loss = 0
        ep_acc = 0
        for i, (inputs, labels) in enumerate(train_loader):
            inputs = inputs.float().to(device)
            labels = labels.to(device)

            optimizer.zero_grad()
            outputs, probs = net(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            ep_loss += loss.item() * len(inputs)
            ep_acc += sum(probs.argmax(dim=-1) == labels).item()
        losses.append(ep_loss / len(train_set))
        accuracy.append(ep_acc / len(train_set))

    return net, losses, accuracy