cat_train = cats[:batch_size - test_size] cat_test = cats[batch_size - test_size:batch_size] con_train = conts[:batch_size - test_size] con_test = conts[batch_size - test_size:batch_size] y_train = y[:batch_size - test_size] y_test = y[batch_size - test_size:batch_size] # Neural Networky stuff selfembeds = nn.ModuleList([nn.Embedding(ni, nf) for ni, nf in emb_szs]) torch.manual_seed(33) model = TabularModel(emb_szs, conts.shape[1], 1, [200, 100], p=0.4) criterion = nn.MSELoss() # we'll convert this to RMSE later optimizer = torch.optim.Adam(model.parameters(), lr=0.001) start_time = time.time() epochs = 300 losses = [] for i in range(epochs): i += 1 y_pred = model(cat_train, con_train) loss = torch.sqrt(criterion(y_pred, y_train)) # RMSE losses.append(loss) # a neat trick to save screen space: if i % 25 == 1: print(f'epoch: {i} loss: {loss.item()}')