Ejemplo n.º 1
0
def evaluate_dataset(net, dataset, batch_size=32):
    """
  Args:
      net: model
      x: (num_samples, num_features)
      y: (num_samples,) for binary label, or
         (num_samples, num_classes) for multiclass label
  """
    net.eval()
    from torch.utils.data import DataLoader
    data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=False)
    n_correct = 0
    loss_avg = 0
    i = 0
    for batch in data_loader:
        i += 1
        inputs, labels = batch
        inputs = _H.tensor(inputs.numpy())
        labels = _H.tensor(labels.numpy())

        net.zero_grad()
        output = net(inputs)
        loss = _H.CrossEntropyLoss(output, labels).item()
        loss_avg = (loss_avg * (i - 1) + loss) / i
        pred = _np.argmax(output.data, axis=1)
        n_correct += (pred == labels.data).sum()
    net.train()
    return n_correct / len(dataset), loss_avg
Ejemplo n.º 2
0
def evaluate(net, x, y, binary=False, batch_size=64):
    """
  Args:
      net: model
      x: (num_samples, num_features)
      y: (num_samples,) for binary label, or
         (num_samples, num_classes) for multiclass label
  """
    net.eval()
    batches = split(x, y, batch_size)

    i = 0
    loss_avg = 0
    n_correct = 0
    for batch in batches:
        i += 1
        input, target = batch
        input = _H.tensor(input)
        target = _H.tensor(target)

        net.zero_grad()
        output = net(input)
        if binary:
            pred = _np.sign(output.data)
            pred[pred == -1] = 0
            criterian = _H.BCELoss
        else:
            pred = _np.argmax(output.data, axis=1)
            criterian = _H.CrossEntropyLoss
        loss = criterian(output, target).item()
        loss_avg = (loss_avg * (i - 1) + loss) / i
        n_correct += (pred == target.data).sum()
    net.train()
    return n_correct / len(x), loss_avg
Ejemplo n.º 3
0
train_data = Subset(cifar_train, m)
val_data = Subset(cifar_train, np.arange(m, m + 200))
val_data2 = Subset(cifar_train, np.arange(m - 200, m))
test_data = cifar_test

net = LeNetPlus()
optimizer = SGD(net.parameters(), lr=0.0003, momentum=0.9)

batch_size = 64
data_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
epochs = 100
for epoch in range(epochs):
  print(epoch)
  for i, batch in enumerate(data_loader):
    inputs, labels = batch
    inputs = H.tensor(inputs.numpy())
    labels = H.tensor(labels.numpy())

    net.zero_grad()
    output = net(inputs)
    loss = H.CrossEntropyLoss(output, labels)
    loss.backward()
    optimizer.step()
  optimizer.reduce(0.95)
  print(evaluate_dataset(net, val_data2, 64))
  print(evaluate_dataset(net, val_data, 64))

# import networkx as nx

# def draw(g, **kwargs):
#   fig, ax = plt.subplots(1, 1, figsize=(8, 8))
Ejemplo n.º 4
0
def uniform(low=0.0, high=1.0, size=(), requires_grad=False):
    data = _np.random.uniform(low, high, size)
    return _H.tensor(data, requires_grad=requires_grad)
Ejemplo n.º 5
0
def normal(loc=0, scale=1, size=(), requires_grad=False):
    data = _np.random.normal(loc, scale, size)
    return _H.tensor(data, requires_grad=requires_grad)
Ejemplo n.º 6
0
iris = load_iris()
X = iris.data
y = iris.target

x, mean, std = standardize(X)

x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.33)

net = MLP([4, 'bn', 'relu', 10, 'bn', 'relu', 3])
optimizer = SGD(net.parameters(), lr=0.03, momentum=0.9)

epochs = 50
for epoch in range(epochs):
    batches = split(x_train, y_train, 8)
    for batch in batches:
        inputs, target = batch
        inputs = H.tensor(inputs)

        net.zero_grad()
        output = net(inputs)
        loss = H.CrossEntropyLoss(output, target)
        loss.backward()
        optimizer.step()
train_acc, train_loss = evaluate(net, x_train, y_train)
test_acc, test_loss = evaluate(net, x_test, y_test)
print("%d Epochs." % epochs)
print("Training set:")
print("Loss: %.4f   Accuracy: %.2f" % (train_loss, train_acc))
print("Test set:")
print("Loss: %.4f   Accuracy: %.2f" % (test_loss, test_acc))
Ejemplo n.º 7
0
bre = load_breast_cancer()
X = bre.data
y = bre.target

x = X

x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.33)

net = MLP([30, 'bn', 'relu', 10, 'bn', 'relu', 1])
optimizer = SGD(net.parameters(), lr=0.01, momentum=0.9)

epochs = 20
for epoch in range(epochs):
    batches = split(x_train, y_train, 8)
    for batch in batches:
        inputs, target = batch
        inputs = H.tensor(inputs)
        target = H.tensor(target)

        net.zero_grad()
        output = net(inputs)
        loss = H.BCELoss(output, target)
        loss.backward()
        optimizer.step()
train_acc, train_loss = evaluate(net, x_train, y_train, binary=True)
test_acc, test_loss = evaluate(net, x_test, y_test, binary=True)
print("%d Epochs." % epochs)
print("Training set:")
print("Loss: %.4f   Accuracy: %.2f" % (train_loss, train_acc))
print("Test set:")
print("Loss: %.4f   Accuracy: %.2f" % (test_loss, test_acc))
Ejemplo n.º 8
0
def gtensor(data):
    return horch.tensor(data, requires_grad=True)
Ejemplo n.º 9
0
x_val = X[m_train:60000]
y_val = y[m_train:60000]
x_test = X[60000:]
y_test = y[60000:]

net = MLP([784, 'bn', 'relu', 100, 'bn', 'relu', 10])
optimizer = SGD(net.parameters(), lr=0.01, momentum=0.9)

batch_size = 32
epochs = 5
for epoch in range(epochs):
  print("Epoch %d" % (epoch + 1))
  batches = split(x_train, y_train, batch_size)
  for batch in batches:
    input, target = batch
    input = H.tensor(input)
    target = H.tensor(target)

    net.zero_grad()
    output = net(input)
    loss = H.CrossEntropyLoss(output, target)
    loss.backward()
    optimizer.step()
  val_acc, val_loss = evaluate(net, x_val, y_val)
  print("val_loss: %.4f   val_acc: %.2f" % (val_loss, val_acc))
train_acc, train_loss = evaluate(net, x_train, y_train)
test_acc, test_loss = evaluate(net, x_test, y_test)
print("%d Epochs." % epochs)
print("Training set:")
print("Loss: %.4f   Accuracy: %.2f" % (train_loss, train_acc))
print("Test set:")