예제 #1
0
def test_blend(dtype):
    image1 = tf.constant(
        [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=dtype
    )
    image2 = tf.constant(
        [
            [255, 255, 255, 255],
            [255, 255, 255, 255],
            [255, 255, 255, 255],
            [255, 255, 255, 255],
        ],
        dtype=dtype,
    )
    blended = compose_ops.blend(image1, image2, 0.5).numpy()
    np.testing.assert_equal(
        blended,
        [
            [128, 128, 128, 128],
            [128, 128, 128, 128],
            [128, 128, 128, 128],
            [128, 128, 128, 128],
        ],
    )

    image1 = np.random.randint(0, 255, (4, 4, 3), np.uint8)
    image2 = np.random.randint(0, 255, (4, 4, 3), np.uint8)
    blended = compose_ops.blend(
        tf.convert_to_tensor(image1), tf.convert_to_tensor(image2), 0.35
    ).numpy()
    expected = blend_np(image1, image2, 0.35)
    np.testing.assert_equal(blended, expected)
예제 #2
0
def sharpness_image(image: TensorLike, factor: Number) -> tf.Tensor:
    """Implements Sharpness function from PIL using TF ops."""
    orig_image = image
    image_dtype = image.dtype
    # SMOOTH PIL Kernel.
    image = tf.cast(image, tf.float32)
    kernel = (
        tf.constant(
            [[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32, shape=[3, 3, 1, 1]
        )
        / 13.0
    )
    # Tile across channel dimension.
    kernel = tf.tile(kernel, [1, 1, 3, 1])
    strides = [1, 1, 1, 1]
    degenerate = tf.nn.depthwise_conv2d(
        image, kernel, strides, padding="VALID", dilations=[1, 1]
    )
    degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
    degenerate = tf.cast(degenerate, image_dtype)

    # For the borders of the resulting image, fill in the values of the
    # original image.
    mask = tf.ones_like(degenerate)
    padded_mask = tf.pad(mask, [[0, 0], [1, 1], [1, 1], [0, 0]])
    padded_degenerate = tf.pad(degenerate, [[0, 0], [1, 1], [1, 1], [0, 0]])
    result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_image)
    # Blend the final result.
    blended = blend(result, orig_image, factor)
    return tf.cast(blended, image_dtype)
예제 #3
0
def test_blend(dtype):
    image1 = tf.constant(
        [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=dtype)
    image2 = tf.constant(
        [
            [255, 255, 255, 255],
            [255, 255, 255, 255],
            [255, 255, 255, 255],
            [255, 255, 255, 255],
        ],
        dtype=dtype,
    )
    blended = compose_ops.blend(image1, image2, 0.5).numpy()
    np.testing.assert_equal(
        blended,
        [
            [128, 128, 128, 128],
            [128, 128, 128, 128],
            [128, 128, 128, 128],
            [128, 128, 128, 128],
        ],
    )

    np.random.seed(0)
    image1 = np.random.randint(0, 255, (3, 5, 5), np.uint8)
    image2 = np.random.randint(0, 255, (3, 5, 5), np.uint8)
    tf.random.set_seed(0)
    factor = tf.random.uniform(shape=[],
                               maxval=1,
                               dtype=tf.dtypes.float32,
                               seed=0)
    blended = compose_ops.blend(tf.convert_to_tensor(image1),
                                tf.convert_to_tensor(image2), factor).numpy()
    expected = blend_np(image1, image2, factor.numpy())
    np.testing.assert_equal(blended, expected)
    assert blended.dtype == expected.dtype