def train(model, dataloader, optimizer, loss_fn, metric, params): model.train() loss_avg = utils.RunningAverage() output = [] y = [] with tqdm(total=len(dataloader)) as t: for X_batch, y_batch in dataloader: X_batch = X_batch.to(params.device) y_batch = y_batch.to(params.device) output_batch = model(X_batch) loss = loss_fn(output_batch, y_batch) optimizer.zero_grad() loss.backward() optimizer.step() loss_avg.update(loss.item()) y.append(y_batch.data.cpu().numpy()) output.append(output_batch.data.cpu().numpy()) t.set_postfix(loss='{:05.3f}'.format(loss_avg())) t.update() output = np.concatenate(output, axis=0) y = np.concatenate(y, axis=0) metric_score = metric(output, y) avg_loss = loss_avg() return avg_loss, metric_score
def eval_metrics(output, target): metrics = [ mse, abs_rel_diff, scale_invariant_error, median_error, mean_error, rms_linear ] acc_metrics = np.zeros(len(metrics)) output = output[None, :][None, :] target = target[None, :][None, :] for i, metric in enumerate(metrics): acc_metrics[i] += metric(output, target) return acc_metrics
def test(config, model, test_data_loader): metrics = [eval(metric) for metric in config['metrics']] metrics_results = np.zeros([len(metrics)]) for q, p, y in test_data_loader: output = model(q, p, test_data_loader.get_pretrained_embeddings()) output = output.cpu().data.numpy() print(output) metrics_results += np.array([metric(output, y) for metric in metrics])
def evaluate(model, dataloader, loss_fn, metric, params, test_mode=False): model.eval() loss_avg = utils.RunningAverage() output = [] y = [] with torch.no_grad(): for X_batch, y_batch in dataloader: X_batch = X_batch.to(params.device) y_batch = y_batch.to(params.device) output_batch = model(X_batch) loss = loss_fn(output_batch, y_batch) loss_avg.update(loss.item()) y.append(y_batch.data.cpu().numpy()) output.append(output_batch.data.cpu().numpy()) avg_loss = loss_avg() output = np.concatenate(output, axis=0) y = np.concatenate(y, axis=0) metric_score = metric(output, y, test_mode) return avg_loss, metric_score