def testNormalizeL1WithZero(self): with self.cached_session() as sess: tensor = tf.constant(0.0, shape=[2, 3]) normalized_tensor = utils.normalize(tensor, 'l1') expected_tensor = tf.constant(0.0, shape=[2, 3]) sess.run(normalized_tensor) self.assertAllEqual(normalized_tensor, expected_tensor)
def testNormalizeInfWithOnes(self): with self.cached_session() as sess: target_tensor = tf.constant(1.0, shape=[2, 4]) normalized_tensor = utils.normalize(target_tensor, 'infinity') expected_tensor = tf.constant(1.0, shape=[2, 4]) sess.run(normalized_tensor) self.assertAllEqual(normalized_tensor, expected_tensor)
def testNormalizeL1(self): # target_tensor = [[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]] target_tensor = tf.constant(1.0, shape=[2, 4]) normalized_tensor = self.evaluate(utils.normalize(target_tensor, 'l1')) # L1 norm of target_tensor (other than batch/1st dim) is [4, 4]; therefore # normalized_tensor = [[0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25]] expected_tensor = tf.constant(0.25, shape=[2, 4]) self.assertAllEqual(normalized_tensor, expected_tensor)
def testNormalizeInf(self): with self.cached_session() as sess: target_tensor = tf.constant([[1.0, 2.0, -4.0], [-1.0, 5.0, -3.0]]) normalized_tensor = utils.normalize(target_tensor, 'infinity') expected_tensor = tf.constant([[0.25, 0.5, -1.0], [-0.2, 1.0, -0.6]]) sess.run(normalized_tensor) self.assertAllEqual(normalized_tensor, expected_tensor)
def testNormalizeL2(self): # target_tensor = [[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]] target_tensor = tf.constant(1.0, shape=[2, 4]) normalized_tensor = self.evaluate(utils.normalize(target_tensor, 'l2')) # L2 norm of target_tensor (other than batch/1st dim) is: # [sqrt(1^2+1^2+1^2+1^2), sqrt(1^2+1^2+1^2+1^2)] = [2, 2], and therefore # normalized_tensor = [[0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5]] expected_tensor = tf.constant(0.5, shape=[2, 4]) self.assertAllEqual(normalized_tensor, expected_tensor)
def testNormalizeL1(self): with self.cached_session() as sess: # target_tensor = [[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]] target_tensor = tf.constant(1.0, shape=[2, 4]) normalized_tensor = utils.normalize(target_tensor, 'l1') # L1 norm of target_tensor (other than batch/1st dim) is [4, 4]; therefore # target_tensor = [[0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25]] expected_tensor = tf.constant(0.25, shape=[2, 4]) sess.run(normalized_tensor) self.assertAllEqual(normalized_tensor, expected_tensor)
def testNormalizeL1WithZero(self): tensor = tf.constant(0.0, shape=[2, 3]) normalized_tensor = self.evaluate(utils.normalize(tensor, 'l1')) expected_tensor = tf.constant(0.0, shape=[2, 3]) self.assertAllEqual(normalized_tensor, expected_tensor)
def testNormalizeInfWithOnes(self): target_tensor = tf.constant(1.0, shape=[2, 4]) normalized_tensor = self.evaluate( utils.normalize(target_tensor, 'infinity')) expected_tensor = tf.constant(1.0, shape=[2, 4]) self.assertAllEqual(normalized_tensor, expected_tensor)
def testNormalizeInf(self): target_tensor = tf.constant([[1.0, 2.0, -4.0], [-1.0, 5.0, -3.0]]) normalized_tensor = self.evaluate( utils.normalize(target_tensor, 'infinity')) expected_tensor = tf.constant([[0.25, 0.5, -1.0], [-0.2, 1.0, -0.6]]) self.assertAllEqual(normalized_tensor, expected_tensor)
def normalize_with_mask(perturbation): perturbation = utils.apply_feature_mask(perturbation, neighbor_config.feature_mask) return utils.normalize(perturbation, neighbor_config.adv_grad_norm)