def do_train_step(self, batch_of_images, batch_of_contours, *, lr, beta):
        """
        Performs a forward-backward pass, as well as the gradient step, according to the given ``inputs``.

        Notes
        -----
        Note that both input and output are **not** of type `torch.Tensor` - the conversion
        to torch.Tensor is made inside this function.
        """
        self.model_core.train()
        batch_of_images, batch_of_contours = sequence_to_var(
            batch_of_images, batch_of_contours, cuda=self.model_core)

        u_out = self.u_net(batch_of_images)
        prior_out = self.prior_net(batch_of_images)
        post_out = self.post_net(batch_of_images, batch_of_contours)
        point = self.sample(post_out)
        final_out = self.final_net(u_out, point)

        loss_u_net = self.logits2loss(final_out, batch_of_contours)
        loss_kl = self.dist_loss(prior_out, post_out)
        loss = loss_u_net + beta * loss_kl

        set_lr(self.optimizer, lr)
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
        return sequence_to_np(loss, loss_u_net, loss_kl)
    def do_train_step(self, batch_of_images, batch_of_contours, *, lr):
        """
        Performs a forward-backward pass, as well as the gradient step, according to the given ``inputs``.

        Notes
        -----
        Note that both input and output are **not** of type `torch.Tensor` - the conversion
        to torch.Tensor is made inside this function.
        """
        self.model_core.train()
        batch_of_images, batch_of_contours = sequence_to_var(
            batch_of_images, batch_of_contours, cuda=self.model_core)

        u_out = self.u_net(batch_of_images)
        u_curve = self.to_curve(u_out)

        z, reproduced_curve = self.autoencoder(u_curve)

        true_samples = Variable(torch.randn(200, self.latent_space_dim),
                                requires_grad=False)

        loss_u_curve = self.logits2loss(u_out, batch_of_contours)
        loss_mmd = self.dist_loss(true_samples, z)
        loss_reproduced_curve = torch.nn.functional.mse_loss(
            u_curve, reproduced_curve)
        loss = loss_u_curve + self.beta * loss_mmd + self.gama * loss_reproduced_curve

        set_lr(self.optimizer, lr)
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
        return sequence_to_np(loss, loss_u_curve, loss_mmd,
                              loss_reproduced_curve)
Beispiel #3
0
def train_step_with_cc(*inputs,
                       architecture,
                       criterion,
                       optimizer,
                       with_cc=False,
                       **optimizer_params):
    architecture.train()
    if with_cc:
        n_inputs = len(inputs) - 2  # target and cc
        inputs = sequence_to_var(*inputs, device=architecture)
        inputs, target, cc = inputs[:n_inputs], inputs[-2], inputs[-1]
        loss = criterion(architecture(*inputs), target, cc)
    else:
        n_inputs = len(inputs) - 1  # target
        inputs = sequence_to_var(*inputs, device=architecture)
        inputs, target = inputs[:n_inputs], inputs[-1]
        loss = criterion(architecture(*inputs), target)

    optimizer_step(optimizer, loss, **optimizer_params)
    return to_np(loss)
Beispiel #4
0
def padded_mri_train_step(*inputs, architecture, criterion, optimizer,
                          n_targets, **optimizer_params) -> np.ndarray:
    architecture.train()
    n_inputs = len(inputs) - n_targets  # in case n_targets == 0

    inputs = sequence_to_var(*inputs, device=architecture)
    inputs, targets = inputs[:n_inputs], inputs[n_inputs:]

    loss = criterion(architecture(*inputs), *targets)

    optimizer_step(optimizer, loss, **optimizer_params)
    return to_np(loss)