Exemplo n.º 1
0
    def fit(self,
            x,
            t,
            max_epoch=10,
            batch_size=32,
            max_grad=None,
            eval_interval=20):
        data_size = len(x)
        max_iters = data_size // batch_size
        self.eval_interval = eval_interval
        model, optimizer = self.model, self.optimizer
        total_loss = 0
        loss_count = 0

        start_time = time.time()
        for epoch in range(max_epoch):
            # シャッフル
            idx = numpy.random.permutation(numpy.arange(data_size))
            x = x[idx]
            t = t[idx]

            for iters in range(max_iters):
                batch_x = x[iters * batch_size:(iters + 1) * batch_size]
                batch_t = t[iters * batch_size:(iters + 1) * batch_size]

                # 勾配を求め、パラメータを更新
                loss = model.forward(batch_x, batch_t)
                model.backward()
                params, grads = remove_duplicate(model.params,
                                                 model.grads)  # 共有された重みを1つに集約
                if max_grad is not None:
                    clip_grads(grads, max_grad)
                optimizer.update(params, grads)
                total_loss += loss
                loss_count += 1

                # 評価
                if (eval_interval
                        is not None) and (iters % eval_interval) == 0:
                    avg_loss = total_loss / loss_count
                    elapsed_time = time.time() - start_time
                    print(
                        '| epoch %d |  iter %d / %d | time %d[s] | loss %.2f' %
                        (self.current_epoch + 1, iters + 1, max_iters,
                         elapsed_time, avg_loss))
                    self.loss_list.append(float(avg_loss))
                    total_loss, loss_count = 0, 0

            self.current_epoch += 1
Exemplo n.º 2
0
  def fit(self, x, t, max_epoch=10, batch_size=32, max_grad=None, eval_interval=20):

    data_size = len(x)
    max_iters = data_size // batch_size
    self.eval_interval = eval_interval
    total_loss = 0
    loss_count = 0

    model, optimizer = self.model, self.optimizer

    start_time = time.time()

    for epoch in range(max_epoch):
      # shuffle data
      idx = np.random.permutation(data_size)
      x = x[idx]
      t = t[idx]

      for iters in range(max_iters):
        batch_x = x[iters*batch_size:(iters+1)*batch_size]
        batch_t = t[iters*batch_size:(iters+1)*batch_size]

        # Find gradient and update parameters
        loss = model.forward(batch_x, batch_t)
        model.backward()

        # 共有された重みを1つに集約
        # これは、word2vecなど、重複の重みを取り除くことです
        params, grads = remove_duplicate(model.params, model.grads)

        # 勾配爆発を防止
        if max_grad is not None:
          clip_grads(grads, max_grad)
          
        optimizer.update(params, grads)

        total_loss += loss
        loss_count += 1

        # Learning Output
        if (eval_interval is not None) and (iters % eval_interval) == 0:
          avg_loss = total_loss / loss_count
          elapsed_time = time.time() - start_time
          print("| epoch %d | iter %d / %d | time %d[s] | loss %.2f" % (epoch+1, iters+1, max_iters, elapsed_time, avg_loss))
          self.loss_list.append(float(avg_loss))
          total_loss, loss_count = 0, 0

      self.current_epoch += 1
Exemplo n.º 3
0
    def fit(self,
            xs,
            ts,
            max_epoch=10,
            batch_size=20,
            time_size=35,
            max_grad=None,
            eval_interval=20):
        data_size = len(xs)
        max_iters = data_size // (batch_size * time_size)
        self.time_idx = 0
        self.ppl_list = []
        self.eval_interval = eval_interval
        model, optimizer = self.model, self.optimizer
        total_loss = 0
        loss_count = 0

        start_time = time.time()
        for epoch in range(max_epoch):
            for iters in range(max_iters):
                batch_x, batch_t = self.get_batch(xs, ts, batch_size,
                                                  time_size)

                # 勾配を求め、パラメータを更新
                loss = model.forward(batch_x, batch_t)
                model.backward()
                params, grads = remove_duplicate(model.params,
                                                 model.grads)  # 共有された重みを1つに集約
                if max_grad is not None:
                    clip_grads(grads, max_grad)
                optimizer.update(params, grads)
                total_loss += loss
                loss_count += 1

                # パープレキシティの評価
                if (eval_interval
                        is not None) and (iters % eval_interval) == 0:
                    ppl = np.exp(total_loss / loss_count)
                    elapsed_time = time.time() - start_time
                    print(
                        '| epoch %d |  iter %d / %d | time %d[s] | perplexity %.2f'
                        % (self.current_epoch + 1, iters + 1, max_iters,
                           elapsed_time, ppl))
                    self.ppl_list.append(float(ppl))
                    total_loss, loss_count = 0, 0

            self.current_epoch += 1