示例#1
0
    def __call__(self, y_true, y_pred):
        # There are additional parameters for this function
        # Note: some of the 'modes' for edge behavior do not yet have a gradient definition in the Theano tree
        #   and cannot be used for learning

        kernel = [self.kernel_size, self.kernel_size]
        y_true = KC.reshape(y_true, [-1] + list(self.__int_shape(y_pred)[1:]))
        y_pred = KC.reshape(y_pred, [-1] + list(self.__int_shape(y_pred)[1:]))

        patches_pred = KC.extract_image_patches(y_pred, kernel, kernel, 'valid', self.dim_ordering)
        patches_true = KC.extract_image_patches(y_true, kernel, kernel, 'valid', self.dim_ordering)

        # Reshape to get the var in the cells
        bs, w, h, c1, c2, c3 = self.__int_shape(patches_pred)
        patches_pred = KC.reshape(patches_pred, [-1, w, h, c1 * c2 * c3])
        patches_true = KC.reshape(patches_true, [-1, w, h, c1 * c2 * c3])
        # Get mean
        u_true = KC.mean(patches_true, axis=-1)
        u_pred = KC.mean(patches_pred, axis=-1)
        # Get variance
        var_true = K.var(patches_true, axis=-1)
        var_pred = K.var(patches_pred, axis=-1)
        # Get std dev
        covar_true_pred = K.mean(patches_true * patches_pred, axis=-1) - u_true * u_pred

        ssim = (2 * u_true * u_pred + self.c1) * (2 * covar_true_pred + self.c2)
        denom = (K.square(u_true) + K.square(u_pred) + self.c1) * (var_pred + var_true + self.c2)
        ssim /= denom  # no need for clipping, c1 and c2 make the denom non-zero
        return K.mean((1.0 - ssim) / 2.0)
def total_variation_loss(x):
    assert K.ndim(x) == 4
    if K.image_data_format() == 'channels_first':
        a = K.square(x[:, :, :img_h - 1, :img_w - 1] - x[:, :, 1:, :img_w - 1])
        b = K.square(x[:, :, :img_h - 1, :img_w - 1] - x[:, :, :img_h - 1, 1:])
    else:
        # Move the image pixel by pixel, and calculate the variance
        a = K.square(x[:, :img_h - 1, :img_w - 1, :] - x[:, 1:, :img_w - 1, :])
        b = K.square(x[:, :img_h - 1, :img_w - 1, :] - x[:, :img_h - 1, 1:, :])
    return K.sum(K.pow(a + b, 1.25))
示例#3
0
            def _get_coords_for_joint(joint_idx, parent_idx, child_angle_idx,
                                      coords):
                if parent_idx is None:  # joint_idx should be 0
                    coords[joint_idx] = K.zeros(base_shape[:-2] + [3, 1])
                    parent_bone = K.constant(
                        np.concatenate([
                            np.ones(base_shape),
                            np.zeros(base_shape),
                            np.zeros(base_shape)
                        ],
                                       axis=-2))
                else:
                    parent_bone = coords[parent_idx] - coords[joint_idx]
                    parent_bone_norm = K.sqrt(
                        K.sum(K.square(parent_bone), axis=-2, keepdims=True) +
                        K.epsilon())
                    parent_bone = parent_bone / parent_bone_norm

                for child_idx in body_graph[joint_idx]:
                    child_bone = tf.matmul(rotmat_list[child_angle_idx],
                                           parent_bone)
                    child_bone_idx = bone_idcs[(joint_idx, child_idx)]
                    child_bone = child_bone * K.reshape(
                        bone_len_list[child_bone_idx],
                        (child_bone.shape[0], 1, 1, 1))
                    coords[child_idx] = child_bone + coords[joint_idx]
                    child_angle_idx += 1

                for child_idx in body_graph[joint_idx]:
                    child_angle_idx, coords = _get_coords_for_joint(
                        child_idx, joint_idx, child_angle_idx, coords)

                return child_angle_idx, coords
示例#4
0
 def _get_bone_len(arg):
     bone_list = tf.unstack(arg[:, :, 0, :], axis=1)
     bones = [
         bone_list[j] - bone_list[i]
         for i, j in zip(members_from, members_to)
     ]
     bones = K.stack(bones, axis=1)
     return K.sqrt(K.sum(K.square(bones), axis=-1) + K.epsilon())
示例#5
0
def normSAD2(y_true, y_pred):
    y_true2 = K.l2_normalize(y_true + K.epsilon(), axis=-1)
    y_pred2 = K.l2_normalize(y_pred + K.epsilon(), axis=-1)
    mse = K.mean(K.square(y_true - y_pred), axis=-1)
    # sad = -K.log(1.0-K.mean(y_true2 * y_pred2/np.pi, axis=-1))
    sad = K.mean(y_true2 * y_pred2, axis=-1)
    # sid = SID(y_true,y_pred)

    return 0.005 * mse - 0.75 * sad
def style_loss(style, gen):
    assert K.ndim(style) == 3
    assert K.ndim(gen) == 3
    S = gram_matrix(style)
    G = gram_matrix(gen)
    channels = 3
    size = img_h * img_w
    # Euclidean distance of the gram matrices multiplied by the constant
    return K.sum(K.square(S - G)) / (4. * (channels**2) * (size**2))
示例#7
0
def normSAD2(y_true, y_pred):
    # y_true2 = K.l2_normalize(y_true + K.epsilon(), axis=-1)
    # y_pred2 = K.l2_normalize(y_pred + K.epsilon(), axis=-1)
    mse = K.mean(K.square(y_true - y_pred))
    sad = SAD(y_true, y_pred)
    # sad = -K.log(1.0-SAD(y_true, y_pred)/np.pi)
    # sid = SID(y_true,y_pred)

    # return 0.005 * mse + 0.75 * sad
    return 0.005 * mse + 10.0 * sad
示例#8
0
def normSAD(y_true, y_pred):
    # y_true2 = K.l2_normalize(y_true + K.epsilon(), axis=-1)
    # y_pred2 = K.l2_normalize(y_pred + K.epsilon(), axis=-1)
    mse = K.mean(K.square(y_true - y_pred))
    # sad = -K.log(1.0-K.mean(y_true2 * y_pred2/np.pi, axis=-1))
    sad = SAD(y_true, y_pred)
    # sid = SID(y_true,y_pred)

    # return 0.008*mse-1.0*sad
    return 0.008 * mse + 1.0 * sad
示例#9
0
 def _get_avg_bone_len(arg):
     bone_list = tf.unstack(arg[:, :, 0, :], axis=1)
     bones = [
         bone_list[j] - bone_list[i]
         for i, j in zip(members_from, members_to)
     ]
     bones = K.expand_dims(K.stack(bones, axis=1), axis=2)
     bone_len = K.sqrt(
         K.sum(K.square(bones), axis=-1, keepdims=True) + K.epsilon())
     return K.mean(bone_len, axis=1, keepdims=True)
示例#10
0
    def critic_optimizer(self):
        discounted_prediction = K.placeholder(shape=(None, ))

        value = self.critic.output

        # loss = MSE(discounted_prediction, value)
        loss = K.mean(K.square(discounted_prediction - value))

        optimizer = Adam(lr=self.critic_lr)
        updates = optimizer.get_updates(loss, self.critic.trainable_weights)
        train = K.function([self.critic.input, discounted_prediction], [loss],
                           updates=updates)
        return train
示例#11
0
def MSE_KL(y_true, y_pred):
    # y_true=y_true[:,-162:]
    y_true = K.switch(
        K.min(y_true) < 0, y_true - K.min(y_true) + K.epsilon(),
        y_true + K.epsilon())
    y_pred = K.switch(
        K.min(y_pred) < 0, y_pred - K.min(y_pred) + K.epsilon(),
        y_pred + K.epsilon())
    p_n = y_true / K.max(y_true, axis=1, keepdims=True)
    q_n = y_pred / K.max(y_pred, axis=1, keepdims=True)

    return K.mean(K.square(y_true - y_pred),
                  axis=-1) + 0.5 * (K.sum(p_n * K.log(p_n / q_n)) + K.sum(
                      (1.001 - p_n) * K.log((1.01 - p_n) / (1.001 - q_n))))
示例#12
0
def add_loss(model, W):
    inputs = model.inputs[0]
    abnormal = model.inputs[1]
    # abnormal = K.print_tensor(abnormal, message='abnormal = ')
    outputs = model.outputs[0]
    z_mean = model.get_layer('z_mean').output
    z_log_var = model.get_layer('z_log_var').output

    beta = K.sum(1.0 - abnormal, axis=-1, keepdims=True) / W
    # beta = K.print_tensor(beta, message='beta = ')
    reconstruction_loss = mean_squared_error(inputs, outputs)
    reconstruction_loss *= W
    kl_loss = 1 + z_log_var - beta * K.square(z_mean) - K.exp(z_log_var)
    kl_loss = K.sum(kl_loss, axis=-1)
    kl_loss *= -0.5
    vae_loss = K.mean(reconstruction_loss + kl_loss)
    model.add_loss(vae_loss)
示例#13
0
文件: tsne.py 项目: tobytoy/MotionGAN
 def tsne(P, activations):
     #     d = K.shape(activations)[1]
     v = d - 1.
     eps = K.variable(
         10e-15
     )  # needs to be at least 10e-8 to get anything after Q /= K.sum(Q)
     sum_act = K.sum(K.square(activations), axis=1)
     Q = K.reshape(sum_act, [-1, 1]) + -2 * K.dot(activations,
                                                  K.transpose(activations))
     Q = (sum_act + Q) / v
     Q = K.pow(1 + Q, -(v + 1) / 2)
     Q *= K.variable(1 - np.eye(n))
     Q /= K.sum(Q)
     Q = K.maximum(Q, eps)
     C = K.log((P + eps) / (Q + eps))
     C = K.sum(P * C)
     return C
示例#14
0
def rmse(y_true, y_pred):
    return backend.sqrt(backend.mean(backend.square(y_pred - y_true), axis=-1))
def content_loss(content, gen):
    assert K.ndim(content) == 3
    assert K.ndim(gen) == 3
    # Euclidean distance
    return K.sum(K.square(gen - content))
示例#16
0
 def __call__(self, p):
     p *= K.cast(p >= 0., K.floatx())
     return p / (K.epsilon() +
                 K.sqrt(K.sum(K.square(p), axis=self.axis, keepdims=True)))
示例#17
0
文件: edm.py 项目: tobytoy/MotionGAN
def edm(x, y=None):
    with K.name_scope('edm'):
        y = x if y is None else y
        x = K.expand_dims(x, axis=1)
        y = K.expand_dims(y, axis=2)
        return K.sqrt(K.sum(K.square(x - y), axis=-1) + K.epsilon())
示例#18
0
文件: edm.py 项目: tobytoy/MotionGAN
def edm_loss(y_true, y_pred):
    return K.mean(K.sum(K.square(edm(y_true) - edm(y_pred)), axis=[1, 2]))
示例#19
0
def normMSE(y_true, y_pred):
    y_true2 = K.l2_normalize(y_true + K.epsilon(), axis=-1)
    y_pred2 = K.l2_normalize(y_pred + K.epsilon(), axis=-1)
    mse = K.mean(K.square(y_true - y_pred))
    return mse