for X, Y in data_loader:
        X = X.to(device)
        Y = Y.to(device)

        hypothesis = model(X)
        cost = criterion(hypothesis, Y)

        optimizer.zero_grad()
        cost.backward()
        optimizer.step()

        avg_cost += cost / total_batch

    accuracy = mu.get_cross_entropy_accuracy(hypothesis, Y)
    mu.log_epoch(epoch, training_epochs, avg_cost, accuracy)

mu.plt_show()

mu.log("model", model)

################################################################################
# - 이제 테스트를 해보겠습니다.
# -98%의 정확도를 얻습니다. 다음 챕터에서는 층을 더 쌓아보겠습니다.

# 학습을 진행하지 않을 것이므로 torch.no_grad()
with torch.no_grad():
    X_test = mnist_test.test_data.view(len(mnist_test), 1, 28,
                                       28).float().to(device)
    Y_test = mnist_test.test_labels.to(device)
예제 #2
0
    hypothesis = x1_train * w1 + x2_train * w2 + x3_train * w3 + b

    # cost 계산
    cost = torch.mean((hypothesis - y_train)**2)

    # accuracy 계산
    accuracy = mu.get_regression_accuracy(hypothesis, y_train)

    # cost로 H(x) 개선
    optimizer.zero_grad()
    cost.backward()
    optimizer.step()

    # 100번마다 로그 출력
    if epoch % 100 == 0:
        mu.log_epoch(epoch, nb_epochs, cost, accuracy)

mu.plt_show()
mu.log("w1", w1)
mu.log("w2", w2)
mu.log("w3", w3)
mu.log("b", b)

################################################################################
# 벡터와 행렬 연산으로 바꾸기

x_train = torch.FloatTensor([[73, 80, 75], [93, 88, 93], [89, 91, 90],
                             [96, 98, 100], [73, 66, 70]])

y_train = torch.FloatTensor([[152], [185], [180], [196], [142]])
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters())
nb_epochs = 100
mu.plt_init()

for epoch in range(nb_epochs + 1):
    y_pred = model(X)
    loss = loss_fn(y_pred, Y)
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

    if epoch % 10 == 0:
        accuracy = mu.get_cross_entropy_accuracy(y_pred, Y)
        mu.log_epoch(epoch, nb_epochs, loss, accuracy)

mu.plt_show()

mu.log("model", model)

################################################################################
# accuracy 측정

with torch.no_grad():
    prediction = model(X)
    accuracy = mu.get_cross_entropy_accuracy(prediction, Y)
    mu.log("accuracy", accuracy)

################################################################################
# 랜덤 5 항목 테스트