Beispiel #1
0
 def call(self, y_true, y_pred):
     real_loss = categorical_crossentropy(y_true, tf.math.real(y_pred))
     if y_pred.dtype.is_complex:
         imag_loss = categorical_crossentropy(y_true, tf.math.imag(y_pred))
     else:
         imag_loss = real_loss
     return (real_loss + imag_loss) / 2.
Beispiel #2
0
    def custom_loss(y_true, y_pred):

        y_true_label_1 = y_true[:, :num_classes]
        y_true_label_2 = y_true[:, num_classes:num_classes * 2]
        y_pred_label_1 = y_pred[:, :num_classes]
        y_pred_label_2 = y_pred[:, num_classes:num_classes * 2]

        y_pred_embedding_1 = y_pred[:,
                                    num_classes * 2:num_classes * 2 + emb_size]
        y_pred_embedding_2 = y_pred[:, num_classes * 2 + emb_size:]

        class_loss_1 = categorical_crossentropy(y_true_label_1, y_pred_label_1)
        class_loss_2 = categorical_crossentropy(y_true_label_2, y_pred_label_2)
        embedding_loss = cosine_similarity(y_pred_embedding_1,
                                           y_pred_embedding_2)

        are_labels_equal = K.all(K.equal(y_true_label_1, y_true_label_2),
                                 axis=1)

        a = tf.where(are_labels_equal,
                     tf.fill([tf.shape(are_labels_equal)[0]], 1.0),
                     tf.fill([tf.shape(are_labels_equal)[0]], -1.0))

        result = class_loss_1 + class_loss_2 + tf.math.multiply(
            a, embedding_loss)

        return tf.math.reduce_mean(result)
Beispiel #3
0
def dice_cce(y_true, y_pred, axis=(1, 2, 3), ignore_background=False):
    "y_true and y_pred should be one_hot encoded"
    dice = 1 - generalized_dice(y_true, y_pred, axis=(1, 2, 3))
    if ignore_background:
        mask = 1 - y_true[:, :, :, :, 0]
        loss = categorical_crossentropy(y_true, y_pred)
        cce = tf.math.multiply(loss, mask)
    else:
        cce = categorical_crossentropy(y_true, y_pred)
    return dice + cce
Beispiel #4
0
    def build_tabor_regularization(self, input_raw_tensor, model,
                                   y_target_tensor, y_true_tensor):
        reg_losses = []

        # R1 - Overly large triggers
        mask_l1_norm = K.sum(K.abs(self.mask_upsample_tensor))
        mask_l2_norm = K.sum(K.square(self.mask_upsample_tensor))
        mask_r1 = (mask_l1_norm + mask_l2_norm)

        pattern_tensor = (K.ones_like(self.mask_upsample_tensor) \
            - self.mask_upsample_tensor) * self.pattern_raw_tensor
        pattern_l1_norm = K.sum(K.abs(pattern_tensor))
        pattern_l2_norm = K.sum(K.square(pattern_tensor))
        pattern_r1 = (pattern_l1_norm + pattern_l2_norm)

        # R2 - Scattered triggers
        pixel_dif_mask_col = K.sum(K.square(
            self.mask_upsample_tensor[:-1, :, :] \
                                       - self.mask_upsample_tensor[1:, :, :]))
        pixel_dif_mask_row = K.sum(K.square(
            self.mask_upsample_tensor[:, :-1, :] \
                                       - self.mask_upsample_tensor[:, 1:, :]))
        mask_r2 = pixel_dif_mask_col + pixel_dif_mask_row

        pixel_dif_pat_col = K.sum(K.square(pattern_tensor[:-1, :, :] \
            - pattern_tensor[1:, :, :]))
        pixel_dif_pat_row = K.sum(K.square(pattern_tensor[:, :-1, :] \
            - pattern_tensor[:, 1:, :]))
        pattern_r2 = pixel_dif_pat_col + pixel_dif_pat_row

        # R3 - Blocking triggers
        cropped_input_tensor = (K.ones_like(self.mask_upsample_tensor) \
            - self.mask_upsample_tensor) * input_raw_tensor
        r3 = K.mean(
            categorical_crossentropy(
                model(cropped_input_tensor),
                K.reshape(y_true_tensor[0], shape=(1, -1))))

        # R4 - Overlaying triggers
        mask_crop_tensor = self.mask_upsample_tensor * self.pattern_raw_tensor
        r4 = K.mean(
            categorical_crossentropy(
                model(mask_crop_tensor),
                K.reshape(y_target_tensor[0], shape=(1, -1))))

        reg_losses.append(mask_r1)
        reg_losses.append(pattern_r1)
        reg_losses.append(mask_r2)
        reg_losses.append(pattern_r2)
        reg_losses.append(r3)
        reg_losses.append(r4)

        return K.stack(reg_losses)
Beispiel #5
0
def masked_loss(y_true, y_pred):
    max_args = argmax(y_true)
    mask = cast(not_equal(max_args, zeros_like(max_args)), dtype='float32')
    loss = switch(mask,
                  categorical_crossentropy(y_true, y_pred, from_logits=True),
                  zeros_like(mask, dtype=floatx()))
    return sum(loss) / (cast(sum(mask), dtype='float32') + epsilon())
Beispiel #6
0
def model_vgg16_cifar(n_clasif, xshape):
    input_shape = xshape[1:]

    model = Sequential()
    # 2 x Conv
    model.add(
        Conv2D(64, (3, 3),
               input_shape=input_shape,
               padding='same',
               activation='relu'))
    model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # 2 x Conv
    model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
    #model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # 3 x Conv
    model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
    #model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # 3 x Conv
    model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    #model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    #model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # 3 x Conv
    model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    #model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    #model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(Flatten())
    model.add(
        Dense(1000,
              activation='relu',
              activity_regularizer=regularizers.l2(0.001)))
    model.add(
        Dense(1000,
              activation='relu',
              activity_regularizer=regularizers.l2(0.001)))
    #model.add(Dense(4096 , activation='relu'))
    model.add(
        Dense(n_clasif,
              activation='linear',
              activity_regularizer=regularizers.l2(0.001)))

    model.summary()

    # Compile the model
    model.compile(
        loss=losses.categorical_crossentropy(from_logits=True),
        optimizer=optimizers.Adam(
            learning_rate=0.001),  #optimizers.SGD(lr=0.03), 
        metrics=[metrics.CategoricalAccuracy('acc')])
    return model
Beispiel #7
0
    def actor_loss(self, states, actions, values, rewards, next_values, dones):
        policy = self.actor_model(
            tf.convert_to_tensor(np.vstack(states), dtype=tf.float32))
        advantages = []
        for i in range(len(states)):
            reward = np.array(rewards[i])
            value = np.array(values[i])
            next_value = np.array(next_values[i])

            if dones[i]:
                advantages.append(reward - value)
            else:
                advantages.append(reward + self.df * next_value - value)
        advantages = tf.reshape(advantages, [len(states)])
        tf.convert_to_tensor(advantages, dtype=tf.float32)

        # SparseCategoricalCrossentropy
        entropy = losses.categorical_crossentropy(policy,
                                                  policy,
                                                  from_logits=True)
        ce_loss = losses.SparseCategoricalCrossentropy(from_logits=True)
        #policy_loss = ce_loss(actions, policy, sample_weight=np.array(advantages))  # same way
        log_pi = ce_loss(actions, policy)
        policy_loss = log_pi * np.array(advantages)
        policy_loss = tf.reduce_mean(policy_loss)
        log_pi = tf.reduce_mean(log_pi)
        return policy_loss - self.en * entropy, log_pi
Beispiel #8
0
def predict(gids, model_path, mode="train"):
    model = load_model(model_path, custom_objects=TF_CUSTOM_METRICS)

    images = []
    labels = []
    for gid in gids:
        image = cv2.imread(
            os.path.join("E:", "data", "unet", mode, "images", f"{gid}.png"),
            cv2.IMREAD_COLOR)
        images.append(image / 255)

        label = cv2.imread(
            os.path.join("E:", "data", "unet", mode, "labels", f"{gid}.png"),
            cv2.IMREAD_GRAYSCALE)
        labels.append(one_hot_encoding(label))

    pred = model.predict(np.array(images))
    losses = categorical_crossentropy(labels, pred)
    losses = np.mean(losses, axis=(1, 2))

    argmax_mean_iou = ArgmaxMeanIoU(num_classes=6)
    for idx, p in enumerate(pred):
        argmax_mean_iou.update_state(labels[idx], p)
        iou = argmax_mean_iou.result().numpy()

        print(f"{gids[idx]}: loss={losses[idx]:02f}     iou={iou:02f}")

        cv2.imwrite(f"images/{mode}/{gids[idx]}-prediction.png",
                    one_hot_to_rgb(p))
        cv2.imwrite(f"images/{mode}/{gids[idx]}-label.png",
                    one_hot_to_rgb(labels[idx]))
        cv2.imwrite(f"images/{mode}/{gids[idx]}-image.png", images[idx] * 255)
 def _actor_loss(self, acts_and_advs, actor_logits):
     """The custom loss function for the actor
     For explanation of how tf/keras calls it, see critic loss above and reference.
     y_true = targets are actions and advantages. The y_pred = policy = output (logits) of 
     the actor network (not normalized)
     """
     actions, advantages = tf.split(acts_and_advs, 2, axis=-1)
     # sparse categorical CE loss obj that supports sample_weight arg on call()
     # from_logits argument ensures transformation into normalized probabilities
     weighted_sparse_ce = kls.SparseCategoricalCrossentropy(
         from_logits=True)
     # policy loss is defined by policy gradients, weighted by advantages
     # note: we only calculate the loss on the actions we've actually taken
     # that and because A2C is on policy method is the reason why policy gardients
     # ususally require many episodeds to converge
     actions = tf.cast(actions, tf.int32)
     policy_loss = weighted_sparse_ce(actions,
                                      actor_logits,
                                      sample_weight=advantages)
     # entropy loss can be calculated via CE over itself
     entropy_loss = kls.categorical_crossentropy(actor_logits,
                                                 actor_logits,
                                                 from_logits=True)
     #signs are flipped because optimizer minimizes
     return policy_loss - self.ENTROPY_FACTOR * entropy_loss
Beispiel #10
0
def validate(model, x, y, bs):
    prediction = model.predict(x, batch_size=bs)
    validation_loss = K.eval(
        K.mean(categorical_crossentropy(K.constant(y),
                                        K.constant(prediction))))
    log_validation_performance(val_loss=validation_loss)
    return validation_loss
Beispiel #11
0
    def _logits_loss(self, acts_and_advs, logits):

        # a trick to input actions and advantages through same API

        actions, advantages = tf.split(acts_and_advs, 2, axis=-1)

        # sparse categorical CE loss obj that supports sample_weight arg on call()

        # from_logits argument ensures transformation into normalized probabilities

        weighted_sparse_ce = kls.SparseCategoricalCrossentropy(
            from_logits=True)

        # policy loss is defined by policy gradients, weighted by advantages

        # note: we only calculate the loss on the actions we've actually taken

        actions = tf.cast(actions, tf.int32)

        policy_loss = weighted_sparse_ce(actions,
                                         logits,
                                         sample_weight=advantages)

        # entropy loss can be calculated via CE over itself

        entropy_loss = kls.categorical_crossentropy(logits,
                                                    logits,
                                                    from_logits=True)

        # here signs are flipped because optimizer minimizes

        return policy_loss - self.params['entropy'] * entropy_loss
Beispiel #12
0
 def _self_guided_earth_mover_distance(
         y_true: K.placeholder,
         y_pred: K.placeholder,
 ) -> K.placeholder:
     cross_entropy_loss = categorical_crossentropy(
         y_true=y_true,
         y_pred=y_pred
     )
     if model.emd_weight_head_start.emd_weight == 0:
         if model.emd_weight_head_start.epoch == 3:
             self_guided_emd_loss = _calculate_self_guided_loss(
                 y_true=y_true,
                 y_pred=y_pred,
                 ground_distance_sensitivity=ground_distance_sensitivity,
                 ground_distance_bias=ground_distance_bias,
                 ground_distance_manager=model.ground_distance_manager
             )
             model.emd_weight_head_start.cross_entropy_loss_history.append(cross_entropy_loss)
             model.emd_weight_head_start.self_guided_emd_loss_history.append(self_guided_emd_loss)
         return cross_entropy_loss
     else:
         self_guided_emd_loss = _calculate_self_guided_loss(
             y_true=y_true,
             y_pred=y_pred,
             ground_distance_sensitivity=ground_distance_sensitivity,
             ground_distance_bias=ground_distance_bias,
             ground_distance_manager=model.ground_distance_manager
         )
         return cross_entropy_loss + model.emd_weight_head_start.emd_weight * self_guided_emd_loss
Beispiel #13
0
 def action_loss(self, actions, advantages, policy_prediction):
     actions = tf.cast(actions, tf.int32)
     policy_loss = kls.SparseCategoricalCrossentropy(from_logits=True)(
         actions, policy_prediction, sample_weight=advantages)
     policy_2 = tf.nn.softmax(policy_prediction)
     entropy_loss = kls.categorical_crossentropy(policy_2, policy_2)
     return policy_loss - self.params['entropy'] * entropy_loss
Beispiel #14
0
    def pi_loss(y_true, m_out):
        y_pred, advs, vf = m_out[0], m_out[1], m_out[2]

        y_true_action = y_true[0]
        vf_true = tf.cast(y_true[1], tf.float32)

        # First, one-hot encoding of true value y_true
        y_true_action = tf.expand_dims(tf.cast(y_true_action, tf.int32),
                                       axis=1)
        y_true_action = tf.one_hot(y_true_action, depth=action_size)

        # Execute categorical crossentropy
        neglogp = cat_crosentropy(
            y_true_action,  # True actions chosen
            y_pred,  # Logits from model
            # sample_weight=advs
        )
        policy_loss = tf.reduce_mean(advs * neglogp)

        entropy_loss = kls.categorical_crossentropy(y_pred,
                                                    y_pred,
                                                    from_logits=True)

        loss_vf = kls.mean_squared_error(vf, vf_true)

        return policy_loss - coeff_entropy * entropy_loss + coeff_vf * loss_vf
Beispiel #15
0
    def loss_function(self, state, y_model, d_model, prob, act, value):
        # Reward
        R = d_model - self.alpha * tf.reduce_mean(tf.square(state - y_model))

        # ========= A2C RL training =========
        # ======= Value Loss =======
        advantage = R - value
        value_loss = advantage**2

        # ======= Policy Loss =======
        # One hot encode actions for all L steps
        action_one_hot = tf.one_hot(act, self.L, dtype=tf.float32)
        # Entropy of filter's prob dist for each img, sum over filters and steps
        entropy = tf.reduce_sum(prob * tf.math.log(prob + 1e-20), axis=[1, 2])
        entropy = tf.reshape(entropy, (-1, 1))

        # Cross-entropy for multi-class exclusive problem sum down all filters
        policy_loss = tf.reduce_sum(categorical_crossentropy(
            action_one_hot, prob),
                                    axis=1,
                                    keepdims=True)

        # Stop gradient flow from value network with advantage calculation
        policy_loss *= tf.stop_gradient(advantage)
        policy_loss -= self.beta * entropy

        total_loss = tf.reduce_mean((0.5 * value_loss + policy_loss))
        return total_loss
Beispiel #16
0
def loss(y_true, y_pred):
    mask = np.zeros((1, 362))
    mask[0][-1] = 1
    value_loss = mean_squared_error(mask*y_true, mask*y_pred)
    mask = 1-mask
    policy_loss = K.sum(categorical_crossentropy(mask*y_true, mask*y_pred))
    return policy_loss+value_loss
    def _logits_loss(self, actions_and_advantages, logits):
        # A trick to input actions and advantages through the same API.
        actions, advantages = tf.split(actions_and_advantages, 2, axis=-1)
        # Sparse categorical CE loss obj that supports sample_weight arg on `call()`.
        # `from_logits` argument ensures transformation into normalized probabilities.
        weighted_sparse_ce = kls.SparseCategoricalCrossentropy(
            from_logits=True)
        # Policy loss is defined by policy gradients, weighted by advantages.
        # Note: we only calculate the loss on the actions we've actually taken.
        actions = tf.cast(actions, tf.int32)

        ### NOT SURE, but to ignore the case where advantages equal negative, which causing neg loss
        # adv2 = tf.where( advantages > 0 , advantages, 0)

        policy_loss = weighted_sparse_ce(actions,
                                         logits,
                                         sample_weight=advantages)
        # policy_loss = weighted_sparse_ce(actions, logits, sample_weight=adv2)
        # Entropy loss can be calculated as cross-entropy over itself.
        probs = tf.nn.softmax(logits)
        entropy_loss = kls.categorical_crossentropy(probs, probs)
        # We want to minimize policy and maximize entropy losses.
        # Here signs are flipped because the optimizer minimizes.

        policy_loss2 = tf.where(policy_loss > 0, policy_loss,
                                tf.math.maximum(policy_loss, -10))

        # return policy_loss - self.entropy_c * entropy_loss
        return policy_loss2 - self.entropy_c * entropy_loss
Beispiel #18
0
    def loop_loss(index, sum_loss):
        #pred box (box, (xmin, ymin, xmax, ymax))
        pred_box = y_pred[index, :, 0:4]
        #找出標記ground truth 的anchor
        obj_mask = tf.reshape(y_true[index, :, 4:5], (-1, ))
        boxes = tf.boolean_mask(y_true[index, :, 0:4], obj_mask)
        #使用nms還原ground truth box
        box_ind = tf.image.non_max_suppression(
            boxes, tf.ones(tf.shape(boxes)[0], tf.float32), 40)
        boxes = tf.gather(boxes, box_ind)

        #計算所有預測bbox 與 ground truth bbox 的 CIoU
        CIoU = box_ciou(y_pred[index, :, 0:4], boxes)
        CIoU_loss = tf.reduce_sum(tf.boolean_mask((1 - CIoU), obj_mask))

        #classes loss (只計算正樣本)
        cls_loss = categorical_crossentropy(y_true[index, :, 5:],
                                            y_pred[index, :, 5:])
        cls_loss = tf.reduce_sum(tf.boolean_mask(cls_loss, obj_mask))

        #計算其他預測bbox與ground truth bbox IoU,如果大於ignore_thresh則不計算conf loss
        ignore_mask = CIoU < 0.7
        #利用ignore_mask與obj_mask算出參與計算的conf loss
        mask = tf.math.logical_or(ignore_mask, tf.cast(obj_mask, tf.bool))
        #confidience loss
        conf_loss = binary_crossentropy(y_true[index, :, 4:5], y_pred[index, :,
                                                                      4:5])
        conf_loss = tf.reduce_sum(tf.boolean_mask(conf_loss, mask))
        return index + 1, sum_loss + CIoU_loss + cls_loss + conf_loss
def custom_loss(y_true, y_pred):
    """
    The final loss function consists of the summation of two losses "GDL" and "CE"
    with a regularization term.
    """

    return generalized_dice_loss(
        y_true, y_pred) + 1.25 * categorical_crossentropy(y_true, y_pred)
Beispiel #20
0
def class_loss_cls(y_true, y_pred):
    """
    计算具体的分类loss
    :param y_true: 真实值 [batch_size, num_rois, 4+1]
    :param y_pred: 预测值 [batch_size, num_rois, 4+1]
    :return: classifier class_loss
    """
    return backend.mean(losses.categorical_crossentropy(y_true, y_pred))
Beispiel #21
0
def loss(target, pred):
    target_label = target[..., :1]
    pred_label = pred[..., :1]

    ceLoss = categorical_crossentropy(target_label, pred_label)
    smoothL1 = smooth_l1_loss(target, pred)

    return tf.reduce_mean(ceLoss) + smoothL1
Beispiel #22
0
    def loss(y_true, y_pred):
        policy_loss = 0.

        for i in range(config.mu.unroll_steps):
            policy_loss += losses.categorical_crossentropy(
                y_true[:, i], y_pred[:, i]) / config.mu.unroll_steps

        return policy_loss
Beispiel #23
0
def optimize_actor_func(policy, action):
    # print(action_reward_value.shape)
    # action, rewards, value = action_reward_value[:,:3], action_reward_value[:,3], action_reward_value[:,4]
    # policy, value = policy_value[0], policy_value[1]
    # policy = policy
    # action = action_rewards

    return categorical_crossentropy(policy, action)
 def entropy2(self, logits):
     '''
     Entropy calc with keras over logits ??
     '''
     entropy = kls.categorical_crossentropy(logits,
                                            logits,
                                            from_logits=True)
     return entropy
Beispiel #25
0
def custom_loss(y_true, y_pred, lambda_r=10):
    """
    custom loss function that is a combination of mean squared error and categorical cross entropy loss
    """

    reg_loss = lambda_r * mean_squared_error(y_true[:, :1], y_pred[:, :1])
    clf_loss = categorical_crossentropy(y_true[:, 1:], y_pred[:, 1:])
    return reg_loss + clf_loss
def custom_loss(y_true, y_pred):
  # target is a 8 - tuple 
  # (row, col, depth, width, class1, class2, class3, object_appeared)

  bce = binary_crossentropy(y_true[:, :4], y_pred[:, :4]) # location
  cce = categorical_crossentropy(y_true[:, 4:7], y_pred[:, 4:7]) #object class
  bce2 = binary_crossentropy(y_true[:, -1], y_pred[:, -1]) #object appeared

  return bce * y_true[:, -1] + cce * y_true[:, -1] + 0.5 * bce2
Beispiel #27
0
    def _logits_loss(self, target, logits):
        TDerrors, actions = tf.split(target, 2, axis=-1)
        logprob = self.logit2logprob(
            actions, logits, sample_weight=TDerrors
        )  #really the log probability is negative of this.
        probs = tf.nn.softmax(logits)
        entropy_loss = kls.categorical_crossentropy(probs, probs)

        return logprob - self.entropy_const * entropy_loss
def weighted_cat_cross_entropy(y_true, y_pred, class_weights):
    class_weights = tf.reduce_sum(y_true, axis=-1, keepdims=True) / tf.reduce_sum(y_true)

    weights = tf.reduce_sum(class_weights * tf.cast(y_true, tf.float32), axis=-1)
    unweighted_losses = categorical_crossentropy(tf.cast(y_true, tf.float32), tf.cast(y_pred, tf.float32))
    weighted_losses = tf.cast(unweighted_losses, tf.float32) * tf.cast(weights, tf.float32)

    loss = tf.reduce_mean(weighted_losses)
    return loss
Beispiel #29
0
 def _logits_loss(self, act_adv, logits):
     ce = kls.CategoricalCrossentropy(from_logits=True)
     actions, advantages = tf.split(act_adv, 2, axis=-1)
     actions = tf.cast(actions, tf.int32)
     policy_loss = ce(actions, logits, sample_weight=advantages)
     entropy_loss = kls.categorical_crossentropy(logits,
                                                 logits,
                                                 from_logits=True)
     return policy_loss - entropy_loss * self.params['entropy']
Beispiel #30
0
    def call(self, y_true, y_pred):
        # print(y_true.shape, y_pred.shape)  # (None, 256) (None, 256)
        y_true = tf.split(y_true, 64, axis=-1)  # a list of 64 * vectors
        y_pred = tf.split(y_pred, 64, axis=-1)
        myloss = 0
        for (true, pred) in zip(y_true, y_pred):
            myloss += losses.categorical_crossentropy(true, pred)

        return myloss