def rotate(self, kappa):
     if len(kappa.shape) == 3:
         if self.rotate_by == "90":
             angle = np.random.randint(low=0, high=3, size=1)[0]
             return tf.image.rot90(kappa, k=angle)
         elif self.rotate_by == "uniform":
             angle = np.random.uniform(low=-np.pi, high=np.pi, size=1)[0]
             return rotate(kappa,
                           angle,
                           interpolation="nearest",
                           fill_mode="constant")
     elif len(kappa.shape) == 4:
         batch_size = kappa.shape[0]
         if self.rotate_by == "90":
             rotated_kap = []
             for j in range(batch_size):
                 angle = np.random.randint(low=0, high=3, size=1)[0]
                 rotated_kap.append(tf.image.rot90(kappa[j], k=angle))
             return tf.stack(rotated_kap, axis=0)
         elif self.rotate_by == "uniform":
             angles = np.random.uniform(low=-np.pi,
                                        high=np.pi,
                                        size=batch_size)
             return rotate(kappa,
                           angles,
                           interpolation="nearest",
                           fill_mode="constant")
Exemplo n.º 2
0
 def process(self, images, index=None):
     """Processes a batch of samples."""
     if index is not None:
         return self.children[index].process(images)
     config = self.random_config
     if config.rotate_by_90:
         images = self._aug_rotate_90(images)
     if config.rotation_probability > 0.0:
         images = tf.cond(self.rotate.value,
                          lambda: tfi.rotate(images, self.angle.value),
                          lambda: tf.identity(images))
     if config.roll_probability > 0.0:
         images = tf.cond(self.roll.value, lambda: self._aug_roll(images),
                          lambda: tf.identity(images))
     if config.resize_probability > 0.0:
         images = tf.cond(self.resize.value,
                          lambda: self._aug_resize(images),
                          lambda: tf.identity(images))
     if config.contrast_probability > 0.0:
         images = tf.cond(self.contrast.value,
                          lambda: self._aug_contrast(images),
                          lambda: tf.identity(images))
     if config.smooth_probability > 0.0:
         images = tf.cond(self.smooth.value,
                          lambda: self._aug_smooth(images),
                          lambda: tf.identity(images))
     if config.negate_probability > 0.0:
         images = tf.cond(self.negate.value,
                          lambda: self._aug_negate(images),
                          lambda: tf.identity(images))
     return images
Exemplo n.º 3
0
def rotate(image, degrees, replace):
    """Rotates the image by degrees either clockwise or counterclockwise.

  Args:
    image: An image Tensor of type uint8.
    degrees: Float, a scalar angle in degrees to rotate all images by. If
      degrees is positive the image will be rotated clockwise otherwise it will
      be rotated counterclockwise.
    replace: A one or three value 1D tensor to fill empty pixels caused by the
      rotate operation.

  Returns:
    The rotated version of image.
  """
    # Convert from degrees to radians.
    degrees_to_radians = math.pi / 180.0
    radians = degrees * degrees_to_radians

    # In practice, we should randomize the rotation degrees by flipping
    # it negatively half the time, but that's done on 'degrees' outside
    # of the function.
    if isinstance(replace, list) or isinstance(replace, tuple):
        replace = replace[0]
    image = tfa_image.rotate(image, radians, fill_value=replace)
    return image
Exemplo n.º 4
0
def compute_inception_score(model, d):
    mean, logvar = model.encode(test_images)
    r_m = np.identity(model.latent_dim)
    z = model.reparameterize(mean, logvar)
    r_x = rotate(test_images, d)
    c, s = np.cos(d), np.sin(d)
    r_m[0, [0, 1]], r_m[1, [0, 1]] = [c, s], [-s, c]
    rota_z = matvec(tf.cast(r_m, dtype=tf.float32), z)
    phi_x = model.sample(rota_z)
    return inception_model.compute_score(r_x, phi_x)
    def _rotate(img, mask=None):
        if angle_radian == 0.0:
            # early return if no resizing is required
            if mask is not None:
                return img, mask
            else:
                return img

        if mask is not None:
            # multiply with mask, to ensure non-valid locations are zero
            img = tf.math.multiply(img, mask)
            # rotate img
            img_rotated = tfa_image.rotate(img,
                                           angle_radian,
                                           interpolation='BILINEAR')
            # rotate mask (will serve as normalization weights)
            mask_rotated = tfa_image.rotate(mask,
                                            angle_radian,
                                            interpolation='BILINEAR')
            # normalize sparse flow field and mask
            img_rotated = tf.math.multiply(
                img_rotated, tf.math.reciprocal_no_nan(mask_rotated))
            mask_rotated = tf.math.multiply(
                mask_rotated, tf.math.reciprocal_no_nan(mask_rotated))
        else:
            img_rotated = tfa_image.rotate(img,
                                           angle_radian,
                                           interpolation='BILINEAR')

        if is_flow:
            # If image is a flow image, scale flow values to be consistent with the
            # rotation.
            cos = tf.math.cos(angle_radian)
            sin = tf.math.sin(angle_radian)
            rotation_matrix = tf.reshape([cos, sin, -sin, cos], [2, 2])
            img_rotated = tf.linalg.matmul(img_rotated, rotation_matrix)

        if mask is not None:
            return img_rotated, mask_rotated
        return img_rotated
Exemplo n.º 6
0
 def train_step(model, x, optimizer):
     for degree in range(0, 100, 10):
         d = np.radians(degree)
         with tf.GradientTape() as tape:
             r_x = rotate(x, d)
             ori_loss = compute_loss(model, x)
             rota_loss = reconstruction_loss(model, r_x)
             ori_cross_l = ori_cross_loss(model, x, d)
             rota_cross_l = rota_cross_loss(model, x, d)
             total_loss = ori_loss + rota_loss + ori_cross_l + rota_cross_l
         gradients = tape.gradient(total_loss, model.trainable_variables)
         optimizer.apply_gradients(zip(gradients,
                                       model.trainable_variables))
     '''
Exemplo n.º 7
0
def ori_cross_loss(model, x, d):
    r_x = rotate(x, d)
    mean, logvar = model.encode(r_x)
    r_z = model.reparameterize(mean, logvar)
    c, s = np.cos(d), np.sin(d)
    latent = model.latent_dim
    r_m = np.identity(latent)
    r_m[0, [0, 1]], r_m[1, [0, 1]] = [c, -s], [s, c]
    phi_z = rotate_vector(r_z, r_m)
    phi_x = model.decode(phi_z)

    cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(logits=phi_x, labels=x)
    logx_z = -tf.reduce_sum(cross_ent, axis=[1, 2, 3])

    return -tf.reduce_mean(logx_z)
Exemplo n.º 8
0
def rotate(image):
    level = tf.convert_to_tensor(
        (config.AUGMENT_MAGNITUDE / config._MAX_LEVEL) * 30, tf.float32)
    should_filp = tf.cast(tf.floor(tf.random.uniform([]) + 0.5), tf.bool)
    degree = tf.cond(should_filp, lambda: level, lambda: -level)
    degree_to_radians = tf.convert_to_tensor(math.pi / 180., tf.float32)
    radians = tf.math.multiply(degree, degree_to_radians)
    new_imgsize = tf.cast(tf.math.abs(tf.divide(config.IMG_SIZE, radians)),
                          tf.int32)
    image = tf.image.resize(image, (new_imgsize, new_imgsize))
    image = image_ops.rotate(image, radians, fill_mode='constant')
    image = tf.image.resize_with_crop_or_pad(image, config.IMG_SIZE,
                                             config.IMG_SIZE)
    image = tf.cast(image, tf.uint8)
    return image
Exemplo n.º 9
0
 def train_step(x):
     d = np.radians(random.randint(30, 90))
     with tf.GradientTape(persistent=True) as tape:
         r_x = rotate(x, d)
         ori_loss, ori_disc_loss = compute_loss(x)
         rota_loss, rota_disc_loss = compute_loss(r_x)
         ori_cross_l = ori_cross_loss(model, x, d)
         rota_cross_l = rota_cross_loss(model, x, d)
         total_loss = ori_loss + rota_loss + ori_cross_l + rota_cross_l
         total_disc_loss = ori_disc_loss + rota_disc_loss
     vae_gradients = tape.gradient(total_loss, model.trainable_variables)
     disc_gradients = tape.gradient(total_disc_loss,
                                    discriminator.trainable_variables)
     vae_optimizer.apply_gradients(
         zip(vae_gradients, model.trainable_variables))
     disc_optimizer.apply_gradients(
         zip(disc_gradients, discriminator.trainable_variables))
     '''
Exemplo n.º 10
0
def rotate(image, degrees, fill_value, interpolation='BILINEAR'):
    """Rotates the image by degrees either clockwise or counterclockwise.

    Args:
        image: An image Tensor of type uint8.
        degrees: Float, a scalar angle in degrees to rotate all images by. If degrees is positive the image
            will be rotated clockwise otherwise it will be rotated counterclockwise.
        fill_value: A one or three value 1D tensor to fill empty pixels caused by the rotate operation.
        interpolation: Interpolation method
    Returns:
        The rotated version of image.
    """
    # Convert from degrees to radians.
    degrees_to_radians = math.pi / 180.0
    radians = degrees * degrees_to_radians

    # In practice, we should randomize the rotation degrees by flipping
    # it negatively half the time, but that's done on 'degrees' outside
    # of the function.
    image = tfi.rotate(wrap(image), radians, interpolation=interpolation)
    return unwrap(image, fill_value)
Exemplo n.º 11
0
def rotate(image, degrees, replace):
    """Rotates the image by degrees either clockwise or counterclockwise.
    Args:
        image: An image Tensor of type uint8.
        degrees: Float, a scalar angle in degrees to rotate all images by. If
        degrees is positive the image will be rotated clockwise otherwise it will
        be rotated counterclockwise.
        replace: A one or three value 1D tensor to fill empty pixels caused by
        the rotate operation.
    Returns:
        The rotated version of image.
    """
    with tf.name_scope("rotate"):
        # Convert from degrees to radians.
        degrees_to_radians = math.pi / 180.0
        radians = degrees * degrees_to_radians

        # In practice, we should randomize the rotation degrees by flipping
        # it negatively half the time, but that's done on 'degrees' outside
        # of the function.
        image = image_ops.rotate(wrap(image), radians)
        return unwrap(image, replace)
Exemplo n.º 12
0
def start_train(epochs, model, train_dataset, test_dataset, date, filePath):
    @tf.function
    def train_step(model, x, optimizer):
        for degree in range(0, 100, 10):
            d = np.radians(degree)
            with tf.GradientTape() as tape:
                r_x = rotate(x, d)
                ori_loss = compute_loss(model, x)
                rota_loss = reconstruction_loss(model, r_x)
                ori_cross_l = ori_cross_loss(model, x, d)
                rota_cross_l = rota_cross_loss(model, x, d)
                total_loss = ori_loss + rota_loss + ori_cross_l + rota_cross_l
            gradients = tape.gradient(total_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(gradients,
                                          model.trainable_variables))
        '''
        with tf.GradientTape() as tape:
            r_x = rotate(x, d)
            rota_loss = compute_loss(model, r_x)
        gradients = tape.gradient(rota_loss, model.trainable_variables)  
        optimizer.apply_gradients(zip(gradients, model.trainable_variables))
        with tf.GradientTape() as tape:
        gradients = tape.gradient(total_loss, model.trainable_variables)
        optimizer.apply_gradients(zip(gradients, model.trainable_variables))
        '''

    checkpoint_path = "./checkpoints/" + date + filePath
    ckpt = tf.train.Checkpoint(model=model, optimizer=optimizer)
    ckpt_manager = tf.train.CheckpointManager(ckpt,
                                              checkpoint_path,
                                              max_to_keep=5)
    if ckpt_manager.latest_checkpoint:
        ckpt.restore(ckpt_manager.latest_checkpoint)
        print('Latest checkpoint restored!!')
    degree = np.radians(random.randint(30, 90))
    for test_batch in test_dataset.take(1):
        test_sample = test_batch[0:num_examples_to_generate, :, :, :]
        r_sample = rotate(test_sample, degree)
    generate_and_save_images(model, 0, test_sample, file_path)
    generate_and_save_images(model, 0, r_sample, "rotate_image")
    display.clear_output(wait=False)
    in_range_socres = []
    mean, logvar = model.encode(test_images)
    r_m = np.identity(model.latent_dim)
    z = model.reparameterize(mean, logvar)
    for i in range(0, 100, 10):
        theta = np.radians(i)
        scores = compute_mnist_score(model, classifier, z, theta, r_m)
        in_range_socres.append(scores)
    score = np.mean(in_range_socres)
    while (score <= 6.7):
        start_time = time.time()
        for train_x in train_dataset:
            train_step(model, train_x, optimizer)
        end_time = time.time()
        loss = tf.keras.metrics.Mean()
        epochs += 1
        in_range_socres = []
        mean, logvar = model.encode(test_images)
        r_m = np.identity(model.latent_dim)
        z = model.reparameterize(mean, logvar)
        for i in range(0, 100, 10):
            theta = np.radians(i)
            scores = compute_mnist_score(model, classifier, z, theta, r_m)
            in_range_socres.append(scores)
        score = np.mean(in_range_socres)
        #generate_and_save_images(model, epochs, test_sample, file_path)
        #generate_and_save_images(model, epochs, r_sample, "rotate_image")
        if ((epochs + 1) % 10 == 0) or (score > 6.7):
            ckpt_save_path = ckpt_manager.save()
            print('Saving checkpoint for epoch {} at {}'.format(
                epochs + 1, ckpt_save_path))
            compute_and_save_mnist_score(model, classifier, epochs, file_path)
            for test_x in test_dataset:
                d = np.radians(random.randint(30, 90))
                r_x = rotate(test_x, d)
                total_loss = rota_cross_loss(model, test_x, d) \
                             + ori_cross_loss(model, test_x, d) \
                             + compute_loss(model, test_x) \
                             + reconstruction_loss(model, r_x)
                loss(total_loss)
            elbo = -loss.result()
            print(
                'Epoch: {}, Test set ELBO: {}, time elapse for current epoch: {}'
                .format(epochs, elbo, end_time - start_time))
            print('The current score is {}', score)
Exemplo n.º 13
0
def random_rotate(features, angle_range, keys=("image", )):
    """Randomly rotates all features with defined keys."""
    angle = tf.random.uniform([], *angle_range, dtype=tf.float32)
    for k in keys:
        features[k] = tfa_image.rotate(features[k], angle)
    return features
Exemplo n.º 14
0
    def random_rotate(self, mask):
        """Randomly rotates mask on given range."""

        angle = self.rotate * tf.random.normal([], -1, 1)
        angle = math.pi * angle / 180
        return image_ops.rotate(mask, angle, interpolation=self.interpolation)
Exemplo n.º 15
0
from model import Classifier
from dataset import preprocess_images
from tensorflow_addons.image import rotate
import numpy as np
import tensorflow as tf

(train_set, train_labels), (test_dataset,
                            test_labels) = tf.keras.datasets.mnist.load_data()
test_images = preprocess_images(test_dataset)
classifier = Classifier(shape=(28, 28, 1))
c_t = test_images
c_l = test_labels
for d in range(0, 180, 10):
    degree = np.radians(d)
    r_t = rotate(test_images, degree)
    c_t = np.concatenate((c_t, r_t))
    c_l = np.concatenate((c_l, test_labels))
classifier.compile(
    optimizer='adam',
    loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
    metrics=['accuracy'])
classifier.fit(c_t, c_l, epochs=30, verbose=0)
filePath = "./classifier"
checkpoint_path = "./checkpoints/" + filePath
ckpt = tf.train.Checkpoint(classifier=classifier)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)
ckpt_save_path = ckpt_manager.save()
print('Saving checkpoint for epoch {} at {}'.format(1, ckpt_save_path))
Exemplo n.º 16
0
def start_train(epochs, train_dataset, test_dataset, date, filePath):
    @tf.function
    def train_step(x):
        d = np.radians(random.randint(30, 90))
        with tf.GradientTape(persistent=True) as tape:
            r_x = rotate(x, d)
            ori_loss, ori_disc_loss = compute_loss(x)
            rota_loss, rota_disc_loss = compute_loss(r_x)
            ori_cross_l = ori_cross_loss(model, x, d)
            rota_cross_l = rota_cross_loss(model, x, d)
            total_loss = ori_loss + rota_loss + ori_cross_l + rota_cross_l
            total_disc_loss = ori_disc_loss + rota_disc_loss
        vae_gradients = tape.gradient(total_loss, model.trainable_variables)
        disc_gradients = tape.gradient(total_disc_loss,
                                       discriminator.trainable_variables)
        vae_optimizer.apply_gradients(
            zip(vae_gradients, model.trainable_variables))
        disc_optimizer.apply_gradients(
            zip(disc_gradients, discriminator.trainable_variables))
        '''
        with tf.GradientTape() as tape:
            r_x = rotate(x, d)
            rota_loss = compute_loss(model, r_x)
        gradients = tape.gradient(rota_loss, model.trainable_variables)  
        optimizer.apply_gradients(zip(gradients, model.trainable_variables))
        with tf.GradientTape() as tape:
        gradients = tape.gradient(total_loss, model.trainable_variables)
        optimizer.apply_gradients(zip(gradients, model.trainable_variables))
        '''

    checkpoint_path = "./checkpoints/" + date + filePath
    ckpt = tf.train.Checkpoint(model=model,
                               optimizer=vae_optimizer,
                               discriminator=discriminator,
                               dis_optimizer=disc_optimizer)
    ckpt_manager = tf.train.CheckpointManager(ckpt,
                                              checkpoint_path,
                                              max_to_keep=5)
    if ckpt_manager.latest_checkpoint:
        ckpt.restore(ckpt_manager.latest_checkpoint)
        print('Latest checkpoint restored!!')
    for test_batch in test_dataset.take(1):
        test_sample = test_batch[0:num_examples_to_generate, :, :, :]
    generate_and_save_images(model, 0, test_sample)
    display.clear_output(wait=False)
    for epoch in range(1, epochs + 1):
        start_time = time.time()
        for train_x in train_dataset:
            train_step(train_x)
            end_time = time.time()
        loss = tf.keras.metrics.Mean()
        d = random.randint(30, 90)
        for test_x in test_dataset:
            r_x = rotate(test_x, d)
            total_loss = rota_cross_loss(model, test_x, d) \
                         + ori_cross_loss(model, test_x, d) \
                         + compute_loss(test_x) \
                         + compute_loss(r_x)
            loss(total_loss)
        elbo = -loss.result()
        print(
            'Epoch: {}, Test set ELBO: {}, time elapse for current epoch: {}'.
            format(epoch, elbo, end_time - start_time))
        generate_and_save_images(model, epoch, test_sample)
        if (epoch + 1) % 10 == 0:
            ckpt_save_path = ckpt_manager.save()
            print('Saving checkpoint for epoch {} at {}'.format(
                epoch + 1, ckpt_save_path))