コード例 #1
0
ファイル: loss.py プロジェクト: wolfhu/ijcnn19attacks
 def fprop(self, x, y, **kwargs):
     x_adv = self.attack(x)
     d1 = self.model.fprop(x, **kwargs)
     d2 = self.model.fprop(x_adv, **kwargs)
     pairing_loss = [
         tf.reduce_mean(tf.square(a - b))
         for a, b in zip(d1[Model.O_FEATURES], d2[Model.O_FEATURES])
     ]
     pairing_loss = tf.reduce_mean(pairing_loss)
     loss = tf.reduce_mean(
         softmax_cross_entropy_with_logits(labels=y,
                                           logits=d1[Model.O_LOGITS]))
     loss += tf.reduce_mean(
         softmax_cross_entropy_with_logits(labels=y,
                                           logits=d2[Model.O_LOGITS]))
     return loss + self.weight * pairing_loss
コード例 #2
0
ファイル: loss.py プロジェクト: wolfhu/ijcnn19attacks
 def fprop(self, x, y, **kwargs):
     x_adv = self.attack(x)
     d1 = self.model.fprop(x, **kwargs)
     d2 = self.model.fprop(x_adv, **kwargs)
     pairing_loss = [
         tf.reduce_mean(tf.square(a - b))
         for a, b in zip(d1[Model.O_FEATURES], d2[Model.O_FEATURES])
     ]
     pairing_loss = tf.reduce_mean(pairing_loss)
     loss = softmax_cross_entropy_with_logits(labels=y,
                                              logits=d1[Model.O_LOGITS])
     loss += softmax_cross_entropy_with_logits(labels=y,
                                               logits=d2[Model.O_LOGITS])
     warnings.warn("LossFeaturePairing is deprecated, switch to "
                   "FeaturePairing. LossFeaturePairing may be removed "
                   "on or after 2019-03-06.")
     return loss + self.weight * pairing_loss
コード例 #3
0
ファイル: loss.py プロジェクト: wolfhu/ijcnn19attacks
 def fprop(self, x, y, **kwargs):
     mix = tf.distributions.Beta(self.beta, self.beta)
     mix = mix.sample([tf.shape(x)[0]] + [1] * (len(x.shape) - 1))
     xm = x + mix * (x[::-1] - x)
     ym = y + mix * (y[::-1] - y)
     logits = self.model.get_logits(xm, **kwargs)
     loss = softmax_cross_entropy_with_logits(labels=ym, logits=logits)
     warnings.warn("LossMixUp is deprecated, switch to "
                   "MixUp. LossFeaturePairing may be removed "
                   "on or after 2019-03-06.")
     return loss
コード例 #4
0
ファイル: loss.py プロジェクト: wolfhu/ijcnn19attacks
def attack_softmax_cross_entropy(y, probs, mean=True):
    """
  Define target loss for an Attack.
  :param y: 2D tensor, one hot labels.
  :param probs: 2D tensor, probability distribution output from the model.
  :param mean: bool, reduce mean loss when true.
  :return: return mean of loss if True, otherwise return vector with per
           sample loss
  """
    logits = probs.op.inputs[0] if probs.op.type == 'Softmax' else probs
    out = softmax_cross_entropy_with_logits(logits=logits, labels=y)
    return tf.reduce_mean(out) if mean else out
コード例 #5
0
ファイル: loss.py プロジェクト: wolfhu/ijcnn19attacks
 def fprop(self, x, y, **kwargs):
     with tf.device('/CPU:0'):
         # Prevent error complaining GPU kernels unavailable for this.
         mix = tf.distributions.Beta(self.beta, self.beta)
         mix = mix.sample([tf.shape(x)[0]] + [1] * (len(x.shape) - 1))
     mix = tf.maximum(mix, 1 - mix)
     xm = x + mix * (x[::-1] - x)
     ym = y + mix * (y[::-1] - y)
     logits = self.model.get_logits(xm, **kwargs)
     loss = tf.reduce_mean(
         softmax_cross_entropy_with_logits(labels=ym, logits=logits))
     return loss
コード例 #6
0
ファイル: loss.py プロジェクト: wolfhu/ijcnn19attacks
    def fprop(self, x, y, **kwargs):
        kwargs.update(self.kwargs)
        if self.attack is not None:
            x = x, self.attack(x)
        else:
            x = x,

        # Catching RuntimeError: Variable -= value not supported by tf.eager.
        try:
            y -= self.smoothing * (y - 1. / tf.cast(y.shape[-1], y.dtype))
        except RuntimeError:
            y.assign_sub(self.smoothing *
                         (y - 1. / tf.cast(y.shape[-1], y.dtype)))

        logits = [self.model.get_logits(x, **kwargs) for x in x]
        loss = sum(
            tf.reduce_mean(
                softmax_cross_entropy_with_logits(labels=y, logits=logit))
            for logit in logits)
        return loss
コード例 #7
0
ファイル: loss.py プロジェクト: wolfhu/ijcnn19attacks
    def fprop(self, x, y, **kwargs):
        if self.attack is not None:
            x = x, self.attack(x)
        else:
            x = x,

        # Catching RuntimeError: Variable -= value not supported by tf.eager.
        try:
            y -= self.smoothing * (y - 1. / tf.cast(y.shape[-1], tf.float32))
        except RuntimeError:
            y.assign_sub(self.smoothing *
                         (y - 1. / tf.cast(y.shape[-1], tf.float32)))

        logits = [self.model.get_logits(x, **kwargs) for x in x]
        loss = sum(
            softmax_cross_entropy_with_logits(labels=y, logits=logit)
            for logit in logits)
        warnings.warn("LossCrossEntropy is deprecated, switch to "
                      "CrossEntropy. LossCrossEntropy may be removed on "
                      "or after 2019-03-06.")
        return loss
コード例 #8
0
ファイル: utils_tf.py プロジェクト: arifsuhan/ijcnn19attacks
def model_loss(y, model, mean=True):
  """
  Define loss of TF graph
  :param y: correct labels
  :param model: output of the model
  :param mean: boolean indicating whether should return mean of loss
               or vector of losses for each input of the batch
  :return: return mean of loss if True, otherwise return vector with per
           sample loss
  """
  warnings.warn('This function is deprecated.')
  op = model.op
  if op.type == "Softmax":
    logits, = op.inputs
  else:
    logits = model

  out = softmax_cross_entropy_with_logits(logits=logits, labels=y)

  if mean:
    out = reduce_mean(out)
  return out