Exemplo n.º 1
0
def M_step(log_R, log_activation, vote, lambda_val=0.01):
    R_shape = tf.shape(log_R)
    log_R = log_R + log_activation

    R_sum_i = cl.reduce_sum(tf.exp(log_R), axis=-3, keepdims=True)
    log_normalized_R = log_R - \
        tf.reduce_logsumexp(log_R, axis=-3, keepdims=True)

    pose = cl.reduce_sum(vote * tf.exp(log_normalized_R),
                         axis=-3,
                         keepdims=True)
    log_var = tf.reduce_logsumexp(log_normalized_R +
                                  cl.log(tf.square(vote - pose)),
                                  axis=-3,
                                  keepdims=True)
    shape = [1 for i in range(len(pose.shape) - 2)] + [pose.shape[-2], 1]
    beta_v = tf.Variable(initial_value=tf.keras.initializers.TruncatedNormal(
        mean=15., stddev=3.)(shape),
                         name="beta_v",
                         shape=shape)
    cost = R_sum_i * (beta_v + 0.5 * log_var)

    beta_a = tf.Variable(initial_value=tf.keras.initializers.TruncatedNormal(
        mean=100.0, stddev=10)(shape),
                         name="beta_a",
                         shape=shape)
    cost_sum_h = cl.reduce_sum(cost, axis=-1, keepdims=True)
    logit = lambda_val * (beta_a - cost_sum_h)
    log_activation = tf.math.log_sigmoid(logit)

    return (pose, log_var, log_activation)
Exemplo n.º 2
0
def E_step(pose, log_var, log_activation, vote):
    normalized_vote = cl.divide(tf.square(vote - pose), 2 * tf.exp(log_var))
    log_probs = normalized_vote + cl.log(2 * np.pi) + log_var
    log_probs = -0.5 * cl.reduce_sum(log_probs, axis=-1, keepdims=True)
    log_activation_logit = log_activation + log_probs
    log_activation_logit = log_probs
    log_R = log_activation_logit - tf.reduce_logsumexp(log_activation_logit, axis=-2, keepdims=True)
    return log_R
Exemplo n.º 3
0
    def M_step(self, log_R, log_activation, vote, lambda_val=0.01):
        log_R = log_R + log_activation

        R_sum_i = cl.reduce_sum(tf.exp(log_R), axis=-3, keepdims=True)
        log_normalized_R = log_R - \
            tf.reduce_logsumexp(log_R, axis=-3, keepdims=True)

        pose = cl.reduce_sum(vote * tf.exp(log_normalized_R),
                             axis=-3,
                             keepdims=True)
        log_var = tf.reduce_logsumexp(log_normalized_R +
                                      cl.log(tf.square(vote - pose)),
                                      axis=-3,
                                      keepdims=True)
        cost = R_sum_i * (self.beta_v + 0.5 * log_var)
        cost_sum_h = cl.reduce_sum(cost, axis=-1, keepdims=True)
        logit = lambda_val * (self.beta_a - cost_sum_h)
        log_activation = tf.math.log_sigmoid(logit)

        return (pose, log_var, log_activation)
Exemplo n.º 4
0
def M_step(log_R, log_activation, vote, lambda_val=0.01):
    R_shape = tf.shape(log_R)
    log_R = log_R + log_activation

    R_sum_i = cl.reduce_sum(tf.exp(log_R), axis=-3, keepdims=True)
    log_normalized_R = log_R - tf.reduce_logsumexp(log_R, axis=-3, keepdims=True)

    pose = cl.reduce_sum(vote * tf.exp(log_normalized_R), axis=-3, keepdims=True)
    log_var = tf.reduce_logsumexp(log_normalized_R + cl.log(tf.square(vote - pose)), axis=-3, keepdims=True)

    beta_v = tf.get_variable('beta_v',
                             shape=[1 for i in range(len(pose.shape) - 2)] + [pose.shape[-2], 1],
                             initializer=tf.truncated_normal_initializer(mean=15., stddev=3.))
    cost = R_sum_i * (beta_v + 0.5 * log_var)

    beta_a = tf.get_variable('beta_a',
                             shape=[1 for i in range(len(pose.shape) - 2)] + [pose.shape[-2], 1],
                             initializer=tf.truncated_normal_initializer(mean=100.0, stddev=10))
    cost_sum_h = cl.reduce_sum(cost, axis=-1, keepdims=True)
    logit = lambda_val * (beta_a - cost_sum_h)
    log_activation = tf.log_sigmoid(logit)

    return(pose, log_var, log_activation)