def main(x_data, y_data, num_iterations, model, is_logit=True): if is_logit: optim = torch.optim.SGD(model.parameters(), lr=0.01) # 70%=0.01) # 50%=0.001) criterion = torch.nn.CrossEntropyLoss() # computes softmax internallyΩ else: optim = torch.optim.Adam(model.parameters(), lr=0.01) # criterion = torch.nn.NLLLoss() # size_average=True # criterion = torch.nn.MSELoss(reduction='sum') for j in range(num_iterations): # run the model forward on the data if is_logit: y_pred = model(x_data).squeeze(-1) # calculate the log loss, when forward computes regression loss = criterion(y_pred, y_data) # (outputs, labels) else: y_pred = model(x_data) # loss = criterion(input=y_pred, target=y_data) loss = nn.functional.binary_cross_entropy(input=y_pred.squeeze(-1), target=y_data) # initialize gradients to zero optim.zero_grad() # backpropagate loss.backward() # take a gradient step optim.step() if (j + 1) % 50 == 0: print("\n [iteration %04d] loss: %.4f " % (j + 1, loss.item())) if is_logit: outputs = model(x_data) _, predicted = torch.max(outputs.data, 1) correct = (predicted == y_data).sum() print('Accuracy of logistic model: {} %'.format( 100 * correct / y_data.size(0))) else: y_pred = model(x_data).squeeze(-1) error_mean = ((y_pred > 0.5).float() - y_data_float).abs().mean() print('Accuracy of regression model: {} %'.format(100. - 100. * error_mean)) # Inspect learned parameters print("Learned parameters:") for name, param in model.named_parameters(): print(name, param.data.numpy())
def main(): x_data = data[:, :-1] y_data = data[:, -1] for j in range(num_iterations): # run the model forward on the data y_pred = net(x_data).squeeze(-1) # calculate the mse loss loss = loss_fn(y_pred, y_data) # initialize gradients to zero optim.zero_grad() # backpropagate loss.backward() # take a gradient step optim.step() if (j + 1) % 50 == 0: print("[iteration %04d] loss: %.4f" % (j + 1, loss.item())) # Inspect learned parameters print("Learned parameters:") for name, param in net.named_parameters(): print(name, param.data.numpy())
def main(): # x_data = train # y_data = test X, y = train[:, 0:n_lag], train[:, n_lag:] X = X.reshape(X.shape[0], 1, X.shape[1]) x_data, y_data = np.array(X), np.array(y) x_data = torch.tensor(x_data, dtype=torch.float) y_data = torch.tensor(y_data, dtype=torch.float) print(x_data.shape) print(y_data.shape) for j in range(num_iterations): y_pred = regression_model(x_data).squeeze(-1) loss = loss_fn(y_pred, y_data) optim.zero_grad() loss.backward() optim.step() if (j + 1) % 100 == 0: print("[iteration %04d] loss: %.4f" % (j + 1, loss.item())) print("Learned parameters:") for name, param in regression_model.named_parameters(): print(name, param.data.numpy())