Пример #1
0
def predict(params, inputs):
  activations = inputs
  for w, b in params[:-1]:
    outputs = np.dot(activations, w) + b
    activations = np.tanh(outputs)

  final_w, final_b = params[-1]
  logits = np.dot(activations, final_w) + final_b
  return logits - logsumexp(logits, axis=1, keepdims=True)
Пример #2
0
def predict(params, inputs):
    for w, b in params:
        outputs = np.dot(inputs, w) + b
        inputs = np.tanh(outputs)
    return outputs - logsumexp(outputs, axis=1, keepdims=True)
Пример #3
0
def logsoftmax(x, axis=-1):
    """Apply log softmax to an array of logits, log-normalizing along an axis."""
    return x - logsumexp(x, axis, keepdims=True)
Пример #4
0
 def lax_fun(array_to_reduce):
     return lsp_misc.logsumexp(array_to_reduce, axis, keepdims=keepdims)
def one_sided_exp(w_F):
    DeltaF = -(logsumexp(-w_F) - np.log(len(w_F)))
    return DeltaF