Beispiel #1
0
def handle_scaling(validation_dataset, src, output: torch.Tensor, labels,
                   probabilistic, m, output_std):
    # To-do move to class fun ction
    output_dist = None
    if probabilistic:
        unscaled_out = validation_dataset.inverse_scale(output)
        try:
            output_std = numpy_to_tvar(output_std)
        except Exception:
            pass
        output_dist = torch.distributions.Normal(unscaled_out, output_std)
    elif m > 1:
        output = validation_dataset.inverse_scale(output.cpu())
        labels = validation_dataset.inverse_scale(labels.cpu())
    elif len(output.shape) == 3:
        output = output.cpu().numpy().transpose(0, 2, 1)
        labels = labels.cpu().numpy().transpose(0, 2, 1)
        output = validation_dataset.inverse_scale(torch.from_numpy(output))
        labels = validation_dataset.inverse_scale(torch.from_numpy(labels))
        stuff = src.cpu().numpy().transpose(0, 2, 1)
        src = validation_dataset.inverse_scale(torch.from_numpy(stuff))
    else:
        output = validation_dataset.inverse_scale(output.cpu().transpose(1, 0))
        labels = validation_dataset.inverse_scale(labels.cpu().transpose(1, 0))
        src = validation_dataset.inverse_scale(src.cpu().transpose(1, 0))
    return src, output, labels, output_dist
Beispiel #2
0
def compute_loss(labels,
                 output,
                 src,
                 criterion,
                 validation_dataset,
                 probabilistic=None,
                 output_std=None,
                 m=1):
    # Warning this assumes src target is 1-D
    if not probabilistic and isinstance(output, torch.Tensor):
        if len(labels.shape) != len(output.shape):
            print(labels.shape)
            print(output.shape)
            if len(labels.shape) > 1:
                if labels.shape[1] == output.shape[1]:
                    labels = labels.unsqueeze(2)
                else:
                    labels = labels.unsqueeze(0)
    if probabilistic:
        if type(output_std) != torch.Tensor:
            print("Converted")
            output_std = torch.from_numpy(output_std)
        if type(output) != torch.Tensor:
            output = torch.from_numpy(output)
        output_dist = torch.distributions.Normal(output, output_std)
    if validation_dataset:
        if probabilistic:
            unscaled_out = validation_dataset.inverse_scale(output)
            try:
                output_std = numpy_to_tvar(output_std)
            except Exception:
                pass
            output_dist = torch.distributions.Normal(unscaled_out, output_std)
        else:
            output = validation_dataset.inverse_scale(output.cpu())
            labels = validation_dataset.inverse_scale(labels.cpu())
            src = validation_dataset.inverse_scale(src.cpu())
    if probabilistic:
        loss = -output_dist.log_prob(labels.float()).sum()  # FIX THIS
        loss = loss.numpy()
    elif isinstance(criterion, GaussianLoss):
        g_loss = GaussianLoss(output[0], output[1])
        loss = g_loss(labels)
    elif isinstance(criterion, MASELoss):
        assert len(labels.shape) == len(output.shape)
        loss = criterion(labels.float(), output, src, m)
    else:
        assert len(labels.shape) == len(output.shape)
        assert labels.shape[0] == output.shape[0]
        loss = criterion(output, labels.float())
    return loss
Beispiel #3
0
def handle_scaling(validation_dataset, src, output: torch.Tensor, labels, probabilistic, m, output_std):
    """Function that handles un-scaling the model output.

    :param validation_dataset: A dataset object for the validation dataset. We use its inverse scale method.
    :type validation_dataset: [type]
    :param src: [description]
    :type src: torch.Tensor
    :param output: [description]
    :type output: torch.Tensor
    :param labels: [description]
    :type labels: torch.Tensor
    :param probabilistic: Whether the model is probablisitic or not.
    :type probabilistic: bool
    :param m: Whether there are multiple targets
    :type m: int
    :param output_std: [description]
    :type output_std: [type]
    :return: [description]
    :rtype: [type]
    """
    # To-do move to class fun ction
    output_dist = None
    if probabilistic:
        unscaled_out = validation_dataset.inverse_scale(output)
        try:
            output_std = numpy_to_tvar(output_std)
        except Exception:
            pass
        output_dist = torch.distributions.Normal(unscaled_out, output_std)
    elif m > 1:
        output = validation_dataset.inverse_scale(output.cpu())
        labels = validation_dataset.inverse_scale(labels.cpu())
    elif len(output.shape) == 3:
        output = output.cpu().numpy().transpose(0, 2, 1)
        labels = labels.cpu().numpy().transpose(0, 2, 1)
        output = validation_dataset.inverse_scale(torch.from_numpy(output))
        labels = validation_dataset.inverse_scale(torch.from_numpy(labels))
        stuff = src.cpu().numpy().transpose(0, 2, 1)
        src = validation_dataset.inverse_scale(torch.from_numpy(stuff))
    else:
        output = validation_dataset.inverse_scale(output.cpu().transpose(1, 0))
        labels = validation_dataset.inverse_scale(labels.cpu().transpose(1, 0))
        src = validation_dataset.inverse_scale(src.cpu().transpose(1, 0))
    return src, output, labels, output_dist
Beispiel #4
0
def compute_validation(validation_loader: DataLoader,  # s lint
                       model,
                       epoch: int,
                       sequence_size: int,
                       criterion: Type[torch.nn.modules.loss._Loss],
                       device: torch.device,
                       decoder_structure=False,
                       meta_data_model=None,
                       use_wandb: bool = False,
                       meta_model=None,
                       val_or_test="validation_loss",
                       probabilistic=False) -> float:
    """
    Function to compute the validation or the test loss
    """
    print('compute_validation')
    model.eval()
    loop_loss = 0.0
    with torch.no_grad():
        i = 0
        loss_unscaled_full = 0.0
        for src, targ in validation_loader:
            src = src.to(device)
            targ = targ.to(device)
            i += 1
            if decoder_structure:
                if type(model).__name__ == "SimpleTransformer":
                    targ_clone = targ.detach().clone()
                    output = greedy_decode(
                        model,
                        src,
                        targ.shape[1],
                        targ_clone,
                        device=device)[
                        :,
                        :,
                        0]
                else:
                    if probabilistic:
                        output, output_std = simple_decode(model,
                                                           src,
                                                           targ.shape[1],
                                                           targ,
                                                           1,
                                                           probabilistic=probabilistic)
                        output, output_std = output[:, :, 0], output_std[0]
                        output_dist = torch.distributions.Normal(output, output_std)
                    else:
                        output = simple_decode(model=model,
                                               src=src,
                                               max_seq_len=targ.shape[1],
                                               real_target=targ,
                                               output_len=1,
                                               probabilistic=probabilistic)[:, :, 0]
            else:
                if probabilistic:
                    output_dist = model(src.float())
                    output = output_dist.mean.detach().numpy()
                    output_std = output_dist.stddev.detach().numpy()
                else:
                    output = model(src.float())
            labels = targ[:, :, 0]
            validation_dataset = validation_loader.dataset
            if validation_dataset.scale:
                unscaled_labels = validation_dataset.inverse_scale(labels)
                if probabilistic:
                    unscaled_out = validation_dataset.inverse_scale(output)
                    try:
                        output_std = numpy_to_tvar(output_std)
                    except Exception:
                        pass
                    unscaled_dist = torch.distributions.Normal(unscaled_out, output_std)
                    loss_unscaled = -unscaled_dist.log_prob(unscaled_labels.float()).sum()  # FIX THIS
                    loss_unscaled_full += len(labels.float()) * loss_unscaled.numpy().item()
                else:
                    # unscaled_src = validation_dataset.scale.inverse_transform(src.cpu())
                    unscaled_out = validation_dataset.inverse_scale(output.cpu())
                    unscaled_labels = validation_dataset.inverse_scale(labels.cpu())
                    loss_unscaled = criterion(unscaled_out, unscaled_labels.float())
                    loss_unscaled_full += len(labels.float()) * loss_unscaled.item()
                if i % 10 == 0 and use_wandb:
                    wandb.log({"trg": unscaled_labels, "model_pred": unscaled_out})
            if probabilistic:
                loss = -output_dist.log_prob(labels.float()).sum()  # FIX THIS
                loss = loss.numpy()
            elif isinstance(criterion, GaussianLoss):
                g_loss = GaussianLoss(output[0], output[1])
                loss = g_loss(labels)
            else:
                loss = criterion(output, labels.float())
            loop_loss += len(labels.float()) * loss.item()
    if use_wandb:
        if loss_unscaled_full:
            tot_unscaled_loss = loss_unscaled_full / (len(validation_loader.dataset) - 1)
            wandb.log({'epoch': epoch,
                       val_or_test: loop_loss / (len(validation_loader.dataset) - 1),
                       "unscaled_" + val_or_test: tot_unscaled_loss})
        else:
            wandb.log({'epoch': epoch, val_or_test: loop_loss /
                       (len(validation_loader.dataset) - 1)})
    model.train()
    return loop_loss / (len(validation_loader.dataset) - 1)
Beispiel #5
0
def compute_loss(labels, output, src, criterion, validation_dataset, probabilistic=None, output_std=None, m=1):
    """Function for computing the loss

    :param labels: The real forecasted values
    :type labels: torch.Tensor
    :param output: The output of the model
    :type output: torch.Tensor
    :param src: The source values (only really needed for MASELoss)
    :type src: torch.Tensor
    :param criterion: [description]
    :type criterion: [type]
    :param validation_dataset: [description]
    :type validation_dataset: [type]
    :param probabilistic: [description], defaults to None
    :type probabilistic: [type], optional
    :param output_std: [description], defaults to None
    :type output_std: [type], optional
    :param m: [description], defaults to 1
    :type m: int, optional
    :return: [description]
    :rtype: [type]
    """
    if not probabilistic and isinstance(output, torch.Tensor):
        if len(labels.shape) != len(output.shape):
            if len(labels.shape) > 1:
                if labels.shape[1] == output.shape[1]:
                    labels = labels.unsqueeze(2)
                else:
                    labels = labels.unsqueeze(0)
    if probabilistic:
        if type(output_std) != torch.Tensor:
            print("Converted")
            output_std = torch.from_numpy(output_std)
        if type(output) != torch.Tensor:
            output = torch.from_numpy(output)
        output_dist = torch.distributions.Normal(output, output_std)
    if validation_dataset:
        if probabilistic:
            unscaled_out = validation_dataset.inverse_scale(output)
            try:
                output_std = numpy_to_tvar(output_std)
            except Exception:
                pass
            output_dist = torch.distributions.Normal(unscaled_out, output_std)
        elif m > 1:
            output = validation_dataset.inverse_scale(output.cpu())
            labels = validation_dataset.inverse_scale(labels.cpu())
        elif len(output.shape) == 3:
            output = output.cpu().numpy().transpose(0, 2, 1)
            labels = labels.cpu().numpy().transpose(0, 2, 1)
            output = validation_dataset.inverse_scale(torch.from_numpy(output))
            labels = validation_dataset.inverse_scale(torch.from_numpy(labels))
            stuff = src.cpu().numpy().transpose(0, 2, 1)
            src = validation_dataset.inverse_scale(torch.from_numpy(stuff))
        else:
            output = validation_dataset.inverse_scale(output.cpu().transpose(1, 0))
            labels = validation_dataset.inverse_scale(labels.cpu().transpose(1, 0))
            src = validation_dataset.inverse_scale(src.cpu().transpose(1, 0))
    if probabilistic:
        loss = -output_dist.log_prob(labels.float()).sum()  # FIX THIS
    elif isinstance(criterion, GaussianLoss):
        g_loss = GaussianLoss(output[0], output[1])
        loss = g_loss(labels)
    elif isinstance(criterion, MASELoss):
        assert len(labels.shape) == len(output.shape)
        loss = criterion(labels.float(), output, src, m)
    else:
        assert len(labels.shape) == len(output.shape)
        assert labels.shape[0] == output.shape[0]
        loss = criterion(output, labels.float())
    return loss
Beispiel #6
0
 def test_ae_2(self):
     self.assertEqual(self.AE.decoder_output_layer.out_features, 10)
     res = numpy_to_tvar(numpy.random.rand(1, 2))
     self.assertIsInstance(res, torch.Tensor)