def discriminator_loss(real_imgs, fake_imgs): real_loss = cross_entropy(tf.ones_like(real_imgs), real_imgs) fake_loss = cross_entropy(tf.zeros_like(fake_imgs), fake_imgs) total_loss = real_loss + fake_loss return total_loss
def discriminator_loss(real_output, fake_output): real_loss = cross_entropy(from_logits=True)(tf.ones_like(real_output), real_output) fake_loss = cross_entropy(from_logits=True)(tf.zeros_like(fake_output), fake_output) total_loss = real_loss + fake_loss return total_loss
def distribution_focal_loss(y_true, y_pred): r"""Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection <https://arxiv.org/abs/2006.04388>`_. Args: pred (tf.Tensor): Predicted general distribution of bounding boxes (before softmax) with shape (N, n+1), n is the max value of the integral set `{0, ..., n}` in paper. label (tf.Tensor): Target distance label for bounding boxes with shape (N,). Returns: torch.Tensor: Loss tensor with shape (N,). """ dis_left = tf.cast(y_true, tf.int32) dis_right = dis_left + 1 weight_left = tf.cast(dis_right, tf.float32) - y_true weight_right = y_true - tf.cast(dis_left, tf.float32) loss = cross_entropy(dis_left, y_pred, from_logits=True) * weight_left \ + cross_entropy(dis_right, y_pred, from_logits=True) * weight_right return loss
def generator_loss(fake_imgs): return cross_entropy(tf.ones_like(fake_imgs), fake_imgs)
## Define Constans GENERATE_SQUARE = 96 IMG_CHANNEL = 3 NOISE = 100 BATCH_SIZE = 32 BUFFER_DATA = 20000 EPOCHS = 200 IMG_SHAPE = (GENERATE_SQUARE, GENERATE_SQUARE, IMG_CHANNEL) PATH = "Anime" train = [] PREVIEW_ROWS = 4 PREVIEW_COLS = 7 PREVIEW_MARGIN = 16 cross_entropy = cross_entropy() generator_optimizer = tf.keras.optimizers.Adam(1.5e-4, 0.5) discriminator_optimizer = tf.keras.optimizers.Adam(1.5e-4, 0.5) ## Prepare Data for filename in tqdm(os.listdir(PATH)): img_ = Image.open(PATH + "/" + filename).resize( (GENERATE_SQUARE, GENERATE_SQUARE), Image.ANTIALIAS) train.append(np.asarray(img_)) train = np.reshape(train, (-1, GENERATE_SQUARE, GENERATE_SQUARE, IMG_CHANNEL)) train = train.astype(np.float32) train = train / 127.5 - 1. train_ = tf.data.Dataset.from_tensor_slices(train).shuffle(BUFFER_DATA).batch( BATCH_SIZE)
def generator_loss(fake_output): return cross_entropy(from_logits=True)(tf.ones_like(fake_output), fake_output)