loss_indices = np.where(~np.isnan(targets)) if use_gpu: targets = targets.cuda() inputs = inputs[:, begin_loss_ind:, :] depths = depths[:, begin_loss_ind:] mse = mse_criterion(pred[loss_indices], targets[loss_indices]) avg_mse += mse if mse > 0: #obsolete i think ct += 1 avg_mse = avg_mse / ct #save model (outputm_npy, labelm_npy) = parseMatricesFromSeqs(pred.cpu().numpy(), targets.cpu().numpy(), depths, tmp_dates, n_depths, n_test_dates_target, u_depths_target, unique_tst_dates_target) #store output output_mats[i,:,:] = outputm_npy if i == 0: #store label label_mats = labelm_npy loss_output = outputm_npy[~np.isnan(labelm_npy)] loss_label = labelm_npy[~np.isnan(labelm_npy)] mat_rmse = np.sqrt(((loss_output - loss_label) ** 2).mean()) #save model total_output_npy = np.average(output_mats,axis=0) loss_output = total_output_npy[~np.isnan(label_mats)]
h_state = None lstm_net.hidden = lstm_net.init_hidden(batch_size=inputs.size()[0]) pred, h_state = lstm_net(inputs, h_state) pred = pred.view(pred.size()[0], -1) pred = pred[:, begin_loss_ind:] #calculate error loss_indices = np.where(~np.isnan(targets)) inputs = inputs[:, begin_loss_ind:, :] depths = depths[:, begin_loss_ind:] mse = mse_criterion(pred[loss_indices], targets[loss_indices]) avg_mse += mse #fill in data structs to save model outputs (output_npy, label_npy) = parseMatricesFromSeqs(pred, targets, depths, \ tmp_dates, n_depths, \ n_test_dates, u_depths, \ unique_tst_dates) #save model saveModel(lstm_net.state_dict(), optimizer.state_dict(), save_path) print("training finished in "+ str(epoch) + " epochs") print("finished trial", trial) print("rmse=", np.sqrt(avg_mse)) loss_output = output_npy[~np.isnan(label_npy)] loss_label = label_npy[~np.isnan(label_npy)] mat_rmse = np.sqrt(((loss_output - loss_label) ** 2).mean()) print("Total rmse=", mat_rmse) saveFeatherFullData(output_npy, label_npy, unique_tst_dates, args.lake_name, trial) err_per_trial[trial] = mat_rmse