Beispiel #1
0
def evaluate_dataset(net, dataset, batch_size=32):
    """
  Args:
      net: model
      x: (num_samples, num_features)
      y: (num_samples,) for binary label, or
         (num_samples, num_classes) for multiclass label
  """
    net.eval()
    from torch.utils.data import DataLoader
    data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=False)
    n_correct = 0
    loss_avg = 0
    i = 0
    for batch in data_loader:
        i += 1
        inputs, labels = batch
        inputs = _H.tensor(inputs.numpy())
        labels = _H.tensor(labels.numpy())

        net.zero_grad()
        output = net(inputs)
        loss = _H.CrossEntropyLoss(output, labels).item()
        loss_avg = (loss_avg * (i - 1) + loss) / i
        pred = _np.argmax(output.data, axis=1)
        n_correct += (pred == labels.data).sum()
    net.train()
    return n_correct / len(dataset), loss_avg
Beispiel #2
0
net = LeNetPlus()
optimizer = SGD(net.parameters(), lr=0.0003, momentum=0.9)

batch_size = 64
data_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
epochs = 100
for epoch in range(epochs):
  print(epoch)
  for i, batch in enumerate(data_loader):
    inputs, labels = batch
    inputs = H.tensor(inputs.numpy())
    labels = H.tensor(labels.numpy())

    net.zero_grad()
    output = net(inputs)
    loss = H.CrossEntropyLoss(output, labels)
    loss.backward()
    optimizer.step()
  optimizer.reduce(0.95)
  print(evaluate_dataset(net, val_data2, 64))
  print(evaluate_dataset(net, val_data, 64))

# import networkx as nx

# def draw(g, **kwargs):
#   fig, ax = plt.subplots(1, 1, figsize=(8, 8))
#   pos = nx.spring_layout(g, k=2, **kwargs)
#   labels = dict(g.nodes('name'))
#   nx.draw(g, labels=labels, pos=pos, ax=ax,
#           font_size=12, node_color='skyblue')
Beispiel #3
0
iris = load_iris()
X = iris.data
y = iris.target

x, mean, std = standardize(X)

x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.33)

net = MLP([4, 'bn', 'relu', 10, 'bn', 'relu', 3])
optimizer = SGD(net.parameters(), lr=0.03, momentum=0.9)

epochs = 50
for epoch in range(epochs):
    batches = split(x_train, y_train, 8)
    for batch in batches:
        inputs, target = batch
        inputs = H.tensor(inputs)

        net.zero_grad()
        output = net(inputs)
        loss = H.CrossEntropyLoss(output, target)
        loss.backward()
        optimizer.step()
train_acc, train_loss = evaluate(net, x_train, y_train)
test_acc, test_loss = evaluate(net, x_test, y_test)
print("%d Epochs." % epochs)
print("Training set:")
print("Loss: %.4f   Accuracy: %.2f" % (train_loss, train_acc))
print("Test set:")
print("Loss: %.4f   Accuracy: %.2f" % (test_loss, test_acc))