def softmax(x):
    """
    Softmax function.
    Score to Probability.
    Returns float [0.0, 1.0]

    >>> x = np.array([0])
    >>> softmax(x)
    array([1.])
    >>> x = np.array([0, 0])
    >>> softmax(x)
    array([0.5, 0.5])
    >>> x = np.array([[0, 0], [0, 0]])
    >>> softmax(x)
    array([[0.5, 0.5],
           [0.5, 0.5]])
    """

    if x.ndim == 2:
        x = x - x.max(axis=1, keepdims=True)
        x = np.exp(x)
        x = x / x.sum(axis=1, keepdims=True)
    elif x.ndim == 1:
        x = x - np.max(x)
        x = np.exp(x) / np.sum(np.exp(x))

    return x
Example #2
0
def softmax(x):
    if x.ndim == 2:
        x = x - x.max(axis=1, keepdims=True)
        x = np.exp(x)
        x /= x.sum(axis=1, keepdims=True)
    elif x.ndim == 1:
        x = x - np.max(x)
        x = np.exp(x) / np.sum(np.exp(x))
    return x
    def forward(self, x, t):
        self.t = t
        self.y = 1 / (1 + np.exp(-x))

        self.loss = cross_entropy_error(np.c_[1 - self.y, self.y], self.t)

        return self.loss
def sigmoid(x):
    """
    Return the sigmoid of x.
    One of the activation functions.

    >>> sigmoid(0)
    0.5
    """
    return 1 / (1 + np.exp(-x))
Example #5
0
    def fit(self,
            xs,
            ts,
            max_epoch=10,
            batch_size=20,
            time_size=35,
            max_grad=None,
            eval_interval=20):
        data_size = len(xs)
        max_iters = data_size // (batch_size * time_size)
        self.time_idx = 0
        self.ppl_list = []
        self.eval_interval = eval_interval
        model, optimizer = self.model, self.optimizer
        total_loss = 0
        loss_count = 0

        start_time = time.time()
        for _ in range(max_epoch):
            for iters in range(max_iters):
                batch_x, batch_t = self.get_batch(xs, ts, batch_size,
                                                  time_size)

                # 勾配を求め、パラメータを更新
                loss = model.forward(batch_x, batch_t)
                model.backward()
                params, grads = remove_duplicate(model.params,
                                                 model.grads)  # 共有された重みを1つに集約
                if max_grad is not None:
                    clip_grads(grads, max_grad)
                optimizer.update(params, grads)
                total_loss += loss
                loss_count += 1

                # パープレキシティの評価
                if (eval_interval
                        is not None) and (iters % eval_interval) == 0:
                    ppl = np.exp(total_loss / loss_count)
                    elapsed_time = time.time() - start_time
                    print(
                        '| epoch %d |  iter %d / %d | time %d[s] | perplexity %.2f'
                        % (self.current_epoch + 1, iters + 1, max_iters,
                           elapsed_time, ppl))
                    self.ppl_list.append(float(ppl))
                    total_loss, loss_count = 0, 0

            self.current_epoch += 1
Example #6
0
 def forward(self, x):
     out = 1 / (1 + np.exp(-x))
     self.out = out
     return out
Example #7
0
def sigmoid(x):
    return 1 / (1 + np.exp(-x))