Ejemplo n.º 1
0
def discriminator_loss(logits_real, logits_fake):
    """
    Computes the discriminator loss.

    You should use the stable torch.nn.functional.binary_cross_entropy_with_logits
    loss rather than using a separate softmax function followed by the binary cross
    entropy loss.

    Inputs:
    - logits_real: PyTorch Tensor of shape (N,) giving scores for the real data.
    - logits_fake: PyTorch Tensor of shape (N,) giving scores for the fake data.

    Returns:
    - loss: PyTorch Tensor containing (scalar) the loss for the discriminator.
    """

    ones_real = torch.ones_like(logits_real)
    zeros_fake = torch.zeros_like(logits_fake)

    D_loss_real = bce_loss(logits_real, ones_real)
    D_loss_fake = bce_loss(logits_fake, zeros_fake)

    loss = D_loss_real + D_loss_fake

    return loss
Ejemplo n.º 2
0
Archivo: losses.py Proyecto: clebov/Ai
def discriminator_loss(logits_real, logits_fake):
    """
    Computes the discriminator loss.
    
    You should use the stable torch.nn.functional.binary_cross_entropy_with_logits 
    loss rather than using a separate softmax function followed by the binary cross
    entropy loss.
    
    Inputs:
    - logits_real: PyTorch Tensor of shape (N,) giving scores for the real data.
    - logits_fake: PyTorch Tensor of shape (N,) giving scores for the fake data.
    
    Returns:
    - loss: PyTorch Tensor containing (scalar) the loss for the discriminator.
    """
    
    loss = None
    
    ####################################
    #          YOUR CODE HERE          #
    ####################################
    N = logits_real.size()
    true_labels = Variable(torch.ones(N)).type(dtype)
    real_image_loss = bce_loss(logits_real, true_labels)
    fake_image_loss = bce_loss(logits_fake, 1 - true_labels)
    
    loss = real_image_loss + fake_image_loss
    
    ##########       END      ##########
    
    return loss
Ejemplo n.º 3
0
def discriminator_loss(logits_real, logits_fake):
    """
    Computes the discriminator loss.
    
    You should use the stable torch.nn.functional.binary_cross_entropy_with_logits 
    loss rather than using a separate softmax function followed by the binary cross
    entropy loss.
    
    Inputs:
    - logits_real: PyTorch Tensor of shape (N,) giving scores for the real data.
    - logits_fake: PyTorch Tensor of shape (N,) giving scores for the fake data.
    
    Returns:
    - loss: PyTorch Tensor containing (scalar) the loss for the discriminator.
    """
    ####################################
    #          YOUR CODE HERE          #
    ####################################
    real_loss = bce_loss(logits_real,
                         torch.ones(logits_real.shape).cuda().detach(),
                         reduction='mean')
    fake_loss = bce_loss(1 - logits_fake,
                         torch.ones(logits_fake.shape).cuda().detach(),
                         reduction='mean')
    loss = (real_loss + fake_loss) / 2
    ##########       END      ##########

    return loss
Ejemplo n.º 4
0
def discriminator_loss(logits_real, logits_fake):
    """
    Computes the discriminator loss.

    You should use the stable torch.nn.functional.binary_cross_entropy_with_logits
    loss rather than using a separate softmax function followed by the binary cross
    entropy loss.

    Inputs:
    - logits_real: PyTorch Tensor of shape (N,) giving scores for the real data.
    - logits_fake: PyTorch Tensor of shape (N,) giving scores for the fake data.

    Returns:
    - loss: PyTorch Tensor containing (scalar) the loss for the discriminator.
    """

    loss = None

    ####################################
    #          YOUR CODE HERE          #
    ####################################

    labels_real = torch.ones_like(logits_real)
    labels_fake = torch.zeros_like(logits_fake)

    loss = bce_loss(logits_real, labels_real, reduction='mean') + bce_loss(
        logits_fake, labels_fake, reduction='mean')

    ##########       END      ##########

    return loss
Ejemplo n.º 5
0
def discriminator_loss(logits_real, logits_fake):
    """
    Computes the discriminator loss.
    
    You should use the stable torch.nn.functional.binary_cross_entropy_with_logits 
    loss rather than using a separate softmax function followed by the binary cross
    entropy loss.
    
    Inputs:
    - logits_real: PyTorch Tensor of shape (N,) giving scores for the real data.
    - logits_fake: PyTorch Tensor of shape (N,) giving scores for the fake data.
    
    Returns:
    - loss: PyTorch Tensor containing (scalar) the loss for the discriminator.
    """

    loss = None

    ####################################
    #          YOUR CODE HERE          #
    ####################################
    logits_real_target = torch.ones(logits_real.size(0), 1).cuda()
    logits_fake_target = torch.zeros(logits_fake.size(0), 1).cuda()

    real_loss = bce_loss(logits_real.reshape(-1, 1), logits_real_target)

    fake_loss = bce_loss(logits_fake.reshape(-1, 1), logits_fake_target)
    loss = real_loss + fake_loss

    ##########       END      ##########

    return loss
Ejemplo n.º 6
0
def discriminator_loss(logits_real, logits_fake, device):
    """
    Computes the discriminator loss.
    
    You should use the stable torch.nn.functional.binary_cross_entropy_with_logits 
    loss rather than using a separate softmax function followed by the binary cross
    entropy loss.
    
    Inputs:
    - logits_real: PyTorch Tensor of shape (N,) giving scores for the real data.
    - logits_fake: PyTorch Tensor of shape (N,) giving scores for the fake data.
    
    Returns:
    - loss: PyTorch Tensor containing (scalar) the loss for the discriminator.
    """

    loss = None

    ####################################
    #          YOUR CODE HERE          #
    ####################################
    batch_size = logits_fake.size()[0]

    zeros = torch.zeros((batch_size, 1)).to(device)
    loss1 = bce_loss(logits_fake, zeros)

    ones = torch.ones((batch_size, 1)).to(device)
    loss2 = bce_loss(logits_real, ones)

    loss = (loss1 + loss2) / batch_size

    ##########       END      ##########

    return loss
Ejemplo n.º 7
0
def discriminator_loss(logits_real, logits_fake):
    """
    Computes the discriminator loss.
    
    You should use the stable torch.nn.functional.binary_cross_entropy_with_logits 
    loss rather than using a separate softmax function followed by the binary cross
    entropy loss.
    
    Inputs:
    - logits_real: PyTorch Tensor of shape (N,) giving scores for the real data.
    - logits_fake: PyTorch Tensor of shape (N,) giving scores for the fake data.
    
    Returns:
    - loss: PyTorch Tensor containing (scalar) the loss for the discriminator.
    """

    # Batch size.
    n = logits_real.size()

    # Target label vector, the discriminator should be aiming
    true_labels = Variable(torch.ones(n)).type(dtype)

    # Discriminator loss has 2 parts: how well it classifies real images and how well it
    # classifies fake images.
    real_image_loss = bce_loss(logits_real, true_labels)
    fake_image_loss = bce_loss(logits_fake, 1 - true_labels)  # one-hot labels

    loss = real_image_loss + fake_image_loss

    ##########       END      ##########

    return loss
Ejemplo n.º 8
0
def discriminator_loss(logits_real, logits_fake):
    loss = None
    N = logits_real.size()
    true_labels = Variable(torch.ones(N)).type(dtype)
    real_image_loss = bce_loss(logits_real.cpu(), true_labels)
    fake_image_loss = bce_loss(logits_fake.cpu(), 1 - true_labels)
    loss = real_image_loss + fake_image_loss
    return loss
Ejemplo n.º 9
0
def generator_loss(logits_fake):
    """
    Computes the generator loss.
    
    You should use the stable torch.nn.functional.binary_cross_entropy_with_logits 
    loss rather than using a separate softmax function followed by the binary cross
    entropy loss.

    Inputs:
    - logits_fake: PyTorch Tensor of shape (N,) giving scores for the fake data.
    
    Returns:
    - loss: PyTorch Tensor containing the (scalar) loss for the generator.
    """

    #loss = None
    # Batch size.
    n = logits_fake.size()

    # Generator is trying to make the discriminator output 1 for all its images.
    # Discriminator determines if the image is real or fake.(1 or 0)
    # So we create a 'target' label vector of ones for computing generator loss.
    true_labels = Variable(torch.ones(n)).type(dtype)

    # Compute the generator loss compraing
    loss = bce_loss(logits_fake, true_labels)

    ##########       END      ##########

    return loss
Ejemplo n.º 10
0
    def forward(self, v, b, q, labels, bias, hint=None, has_hint=None):
        """Forward

        v: [batch, num_objs, obj_dim]
        b: [batch, num_objs, b_dim]
        q: [batch_size, seq_length]
        *_v_emb: [batch, g*v_dim], mask_weight: [batch, g]
        return: logits, not probs
        """
        w_emb = self.w_emb(q)
        q_emb = self.q_emb(w_emb)  # [batch, q_dim]
        v_emb, v_att = self.v_att(v, q_emb, hint)  # [batch, v_dim]
        if config.att_norm:
            v_emb = attention.apply_norm_attention(v, v_att, mode='avg')
        joint_repr, logits = self.classifier(q_emb, v_emb)
        debias_loss = torch.zeros(1)
        if labels is not None:
            if config.use_debias:
                debias_loss = self.debias_loss_fn(joint_repr, logits, bias,
                                                  labels, has_hint)
            elif config.use_rubi:
                q_pred = self.extra_c1(q_emb.detach())
                q_out = self.extra_c2(q_pred)
                rubi_logits = logits * torch.sigmoid(q_pred)
                if has_hint is not None:
                    debias_loss = bce_loss(
                        rubi_logits, labels, reduction='none') + bce_loss(
                            q_out, labels, reduction='none')
                    debias_loss = (debias_loss.sum(dim=1) *
                                   has_hint).sum() / has_hint.sum()
                else:
                    debias_loss = bce_loss(rubi_logits, labels) + bce_loss(
                        q_out, labels)
                    debias_loss *= labels.size(1)

        return logits, debias_loss, v_att
Ejemplo n.º 11
0
def generator_loss(logits_fake):
    """
    Computes the generator loss.

    You should use the stable torch.nn.functional.binary_cross_entropy_with_logits
    loss rather than using a separate softmax function followed by the binary cross
    entropy loss.

    Inputs:
    - logits_fake: PyTorch Tensor of shape (N,) giving scores for the fake data.

    Returns:
    - loss: PyTorch Tensor containing the (scalar) loss for the generator.
    """

    ones = torch.ones_like(logits_fake)
    loss = bce_loss(logits_fake, ones)

    return loss
Ejemplo n.º 12
0
def generator_loss(logits_fake):
    loss = None
    N = logits_fake.size()
    true_labels = Variable(torch.ones(N)).type(dtype)
    loss = bce_loss(logits_fake.cpu(), true_labels)
    return loss
Ejemplo n.º 13
0
def compute_loss(outputs, y, mask):
    masked_loss = bce_loss(
        outputs, y.float(),
        reduction='none') * mask.float()  # (batch_size, max_len)
    return masked_loss.sum() / mask.sum()