コード例 #1
0
ファイル: ddpg.py プロジェクト: danielhers/cnn
    def learn(self, batch_size):
        exps = self.memory.sample(batch_size)
        obss, actions, rewards, obs_nexts, dones = self._process(exps)

        # Update critic
        dy.renew_cg()
        target_actions = self.actor_target(obs_nexts, batched=True)
        target_values = self.critic_target(dy.concatenate([dy.inputTensor(obs_nexts, batched=True), target_actions]),
                                           batched=True)
        target_values = rewards + 0.99 * target_values.npvalue() * (1 - dones)

        dy.renew_cg()
        values = self.critic(np.concatenate([obss, actions]), batched=True)
        loss = dy.mean_batches((values - dy.inputTensor(target_values, batched=True)) ** 2)
        loss_value_critic = loss.npvalue()
        loss.backward()
        self.trainer_critic.update()

        # update actor
        dy.renew_cg()
        actions = self.actor(obss, batched=True)
        obs_and_actions = dy.concatenate([dy.inputTensor(obss, batched=True), actions])
        loss = -dy.mean_batches(self.critic(obs_and_actions, batched=True))
        loss_value_actor = loss.npvalue()
        loss.backward()
        self.trainer_actor.update()

        self.noise_stddev = (
                    self.noise_stddev - self.noise_stddev_decrease) if self.noise_stddev > self.noise_stddev_lower else self.noise_stddev_lower

        self.actor_target.update(self.actor, soft=True)
        self.critic_target.update(self.critic, soft=True)

        return loss_value_actor + loss_value_critic
コード例 #2
0
ファイル: train.py プロジェクト: kiennguyen94/baseline
 def _loss(outputs, labels):
     losses = [
         dy.pickneglogsoftmax_batch(out, label)
         for out, label in zip(outputs, labels)
     ]
     loss = dy.mean_batches(dy.average(losses))
     return loss
コード例 #3
0
 def compose(
         self, embeds: Union[dy.Expression,
                             List[dy.Expression]]) -> dy.Expression:
     if type(embeds) != list:
         return dy.mean_batches(embeds)
     else:
         return dy.average(embeds)
コード例 #4
0
 def on_calc_additional_loss(self, reward):
     if not self.learn_segmentation:
         return None
     ret = LossBuilder()
     if self.length_prior_alpha > 0:
         reward += self.segment_length_prior * self.length_prior_alpha
     reward = dy.cdiv(reward - dy.mean_batches(reward),
                      dy.std_batches(reward))
     # Baseline Loss
     if self.use_baseline:
         baseline_loss = []
         for i, baseline in enumerate(self.bs):
             baseline_loss.append(dy.squared_distance(reward, baseline))
         ret.add_loss("Baseline", dy.esum(baseline_loss))
     # Reinforce Loss
     lmbd = self.lmbd.get_value(self.warmup_counter)
     if lmbd > 0.0:
         reinforce_loss = []
         # Calculating the loss of the baseline and reinforce
         for i in range(len(self.segment_decisions)):
             ll = dy.pick_batch(self.segment_logsoftmaxes[i],
                                self.segment_decisions[i])
             if self.use_baseline:
                 r_i = reward - self.bs[i]
             else:
                 r_i = reward
             reinforce_loss.append(dy.logistic(r_i) * ll)
         ret.add_loss("Reinforce", -dy.esum(reinforce_loss) * lmbd)
     # Total Loss
     return ret
コード例 #5
0
ファイル: train.py プロジェクト: tanthml/baseline
    def _step(self, loader, update, log, reporting_fns, verbose=None):
        steps = len(loader)
        pg = create_progress_bar(steps)
        cm = ConfusionMatrix(self.labels)
        epoch_loss = 0
        epoch_div = 0

        for batch_dict in pg(loader):
            dy.renew_cg()
            inputs = self.model.make_input(batch_dict)
            ys = inputs.pop('y')
            preds = self.model.forward(inputs)
            losses = self.model.loss(preds, ys)
            loss = dy.mean_batches(losses)
            batchsz = self._get_batchsz(batch_dict)
            lossv = loss.npvalue().item() * batchsz
            epoch_loss += lossv
            epoch_div += batchsz
            _add_to_cm(cm, ys, preds.npvalue())
            update(loss)
            log(self.optimizer.global_step, lossv, batchsz, reporting_fns)

        metrics = cm.get_all_metrics()
        metrics['avg_loss'] = epoch_loss / float(epoch_div)
        verbose_output(verbose, cm)
        return metrics
コード例 #6
0
  def on_calc_additional_loss(self, translator_loss):
    if not self.learn_segmentation or self.segment_decisions is None:
      return None
    reward = -translator_loss["mle"]
    if not self.log_reward:
      reward = dy.exp(reward)
    reward = dy.nobackprop(reward)

    # Make sure that reward is not scalar, but rather based on the each batch item
    assert reward.dim()[1] == len(self.src_sent)
    # Mask
    enc_mask = self.enc_mask.get_active_one_mask().transpose() if self.enc_mask is not None else None
    # Compose the lose
    ret = LossBuilder()
    ## Length prior
    alpha = self.length_prior_alpha.value() if self.length_prior_alpha is not None else 0
    if alpha > 0:
      reward += self.segment_length_prior * alpha
    # reward z-score normalization
    if self.z_normalization:
      reward = dy.cdiv(reward-dy.mean_batches(reward), dy.std_batches(reward) + EPS)
    ## Baseline Loss
    if self.use_baseline:
      baseline_loss = []
      for i, baseline in enumerate(self.bs):
        loss = dy.squared_distance(reward, baseline)
        if enc_mask is not None:
          loss = dy.cmult(dy.inputTensor(enc_mask[i], batched=True), loss)
        baseline_loss.append(loss)

      ret.add_loss("Baseline", dy.esum(baseline_loss))

    if self.print_sample:
      print(dy.exp(self.segment_logsoftmaxes[i]).npvalue().transpose()[0])
    ## Reinforce Loss
    lmbd = self.lmbd.value()
    if lmbd > 0.0:
      reinforce_loss = []
      # Calculating the loss of the baseline and reinforce
      for i in range(len(self.segment_decisions)):
        ll = dy.pick_batch(self.segment_logsoftmaxes[i], self.segment_decisions[i])
        if self.use_baseline:
          r_i = reward - dy.nobackprop(self.bs[i])
        else:
          r_i = reward
        if enc_mask is not None:
          ll = dy.cmult(dy.inputTensor(enc_mask[i], batched=True), ll)
        reinforce_loss.append(r_i * -ll)
      loss = dy.esum(reinforce_loss) * lmbd
      ret.add_loss("Reinforce", loss)
    if self.confidence_penalty:
      ls_loss = self.confidence_penalty(self.segment_logsoftmaxes, enc_mask)
      ret.add_loss("Confidence Penalty", ls_loss)
    # Total Loss
    return ret
コード例 #7
0
ファイル: ddpg.py プロジェクト: zhangfanTJU/dynet
    def learn(self, batch_size):
        exps = self.memory.sample(batch_size)
        obss, actions, rewards, obs_nexts, dones = self._process(exps)

        # Update critic
        dy.renew_cg()
        target_actions = self.actor_target(obs_nexts, batched=True)
        target_values = self.critic_target(dy.concatenate(
            [dy.inputTensor(obs_nexts, batched=True), target_actions]),
                                           batched=True)
        target_values = rewards + 0.99 * target_values.npvalue() * (1 - dones)

        dy.renew_cg()
        values = self.critic(np.concatenate([obss, actions]), batched=True)
        loss = dy.mean_batches(
            (values - dy.inputTensor(target_values, batched=True))**2)
        loss_value_critic = loss.npvalue()
        loss.backward()
        self.trainer_critic.update()

        # update actor
        dy.renew_cg()
        actions = self.actor(obss, batched=True)
        obs_and_actions = dy.concatenate(
            [dy.inputTensor(obss, batched=True), actions])
        loss = -dy.mean_batches(self.critic(obs_and_actions, batched=True))
        loss_value_actor = loss.npvalue()
        loss.backward()
        self.trainer_actor.update()

        self.noise_stddev = (
            self.noise_stddev - self.noise_stddev_decrease
        ) if self.noise_stddev > self.noise_stddev_lower else self.noise_stddev_lower

        self.actor_target.update(self.actor, soft=True)
        self.critic_target.update(self.critic, soft=True)

        return loss_value_actor + loss_value_critic
コード例 #8
0
def test():
    lbls = []
    imgs = []
    for lbl, img in test_data:
        lbls.append(lbl)
        imgs.append(img)
    dy.renew_cg()
    losses = network.create_network_return_loss(imgs, lbls, dropout=False)
    loss = dy.mean_batches(losses)
    predicts = network.create_network_return_best(imgs, dropout=False)
    correct = np.sum(lbls == predicts[0])
    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
            loss.value(), correct, len(test_data),
            100. * correct / len(test_data)))
コード例 #9
0
ファイル: ops.py プロジェクト: aka-zyq/Event-Extraction-2
def mean(x, dim=None, include_batch_dim=False):
    if isinstance(x, list):
        return dy.average(x)
    head_shape, batch_size = x.dim()
    if dim is None:
        # warning: dynet only implement 2 or lower dims for mean_elems
        x =  dy.mean_elems(x)
        if include_batch_dim and batch_size > 1:
            return dy.mean_batches(x)
        else:
            return x
    else:
        if dim == -1:
            dim = len(head_shape) - 1
        return dy.mean_dim(x, d=[dim], b=include_batch_dim)
コード例 #10
0
ファイル: train.py プロジェクト: bjayakumar/mead-baseline
    def _step(self,
              loader,
              update,
              log,
              reporting_fns,
              verbose=None,
              output=None,
              txts=None):
        steps = len(loader)
        pg = create_progress_bar(steps)
        cm = ConfusionMatrix(self.labels)
        epoch_loss = 0
        epoch_div = 0
        handle = None
        line_number = 0
        if output is not None and txts is not None:
            handle = open(output, "w")

        for batch_dict in pg(loader):
            dy.renew_cg()
            inputs = self.model.make_input(batch_dict)
            ys = inputs.pop('y')
            preds = self.model.forward(inputs)
            losses = self.model.loss(preds, ys)
            loss = dy.mean_batches(losses)
            batchsz = self._get_batchsz(batch_dict)
            lossv = loss.npvalue().item() * batchsz
            if handle is not None:
                for p, y in zip(preds, ys):
                    handle.write('{}\t{}\t{}\n'.format(
                        " ".join(txts[line_number]), self.model.labels[p],
                        self.model.labels[y]))
                    line_number += 1
            epoch_loss += lossv
            epoch_div += batchsz
            _add_to_cm(cm, ys, preds.npvalue())
            update(loss)
            log(self.optimizer.global_step, lossv, batchsz, reporting_fns)

        metrics = cm.get_all_metrics()
        metrics['avg_loss'] = epoch_loss / float(epoch_div)
        verbose_output(verbose, cm)
        if handle is not None:
            handle.close()
        return metrics
コード例 #11
0
ファイル: Seq2Seq.py プロジェクト: HsiaoYetGun/MiRLocator
	def decode_loss(self, encodings, trg):
		y, masksy = self.prepare_batch(trg, self.__trg_eos)
		slen, batch_size = y.shape
		self.__dec.init_params(encodings, batch_size, self.__train_flag)
		context = dy.zeros((self.__enc.output_dim, ), batch_size=batch_size)

		errs = []
		for cur_word, next_word, mask in zip(y, y[1:], masksy[1:]):
			hidden, embs, _ = self.__dec.next(cur_word, context, self.__train_flag)
			context, _ = self.attend(encodings, hidden)
			score = self.__dec.score(hidden, context, embs, self.__train_flag)
			masksy_embs = dy.inputTensor(mask, batched=True)

			loss = self.cross_entropy_loss(score, next_word, cur_word)
			loss = dy.cmult(loss, masksy_embs)
			errs.append(loss)

		error = dy.mean_batches(dy.esum(errs))
		return error
コード例 #12
0
    def decode_loss(self, encodings, trg):
        """Compute the negative conditional log likelihood of the target sentence
        given the encoding of the source sentence

        :param encodings: Source sentence encodings obtained with self.encode
        :param trg: List of target sentences

        :returns: Expression of the loss averaged on the minibatch
        """
        y, masksy = self.prepare_batch(trg, self.trg_eos)
        slen, bsize = y.shape
        # Init decoder
        self.dec.init(encodings,
                      y,
                      self.usr.user_vector,
                      test=self.test,
                      update=self.update)
        # Initialize context
        context = dy.zeroes((self.enc.dim, ), batch_size=bsize)
        # Process user token if necessary
        if self.user_token:
            _, _, _ = self.dec.next(self.usr.user_vector,
                                    context,
                                    test=self.test)
        # Start decoding
        errs = []
        for cw, nw, mask in zip(y, y[1:], masksy[1:]):
            # Run LSTM
            h, e, _ = self.dec.next(cw, context, test=self.test)
            # Compute next context
            context, _ = self.attend(encodings, h)
            # Score
            s = self.dec.s(h, context, e, test=self.test)
            masksy_e = dy.inputTensor(mask, batched=True)
            # Loss
            loss = self.cross_entropy_loss(s, nw, cw)
            loss = dy.cmult(loss, masksy_e)
            errs.append(loss)
        # Add all losses together
        err = dy.mean_batches(dy.esum(errs))
        return err
コード例 #13
0
 def calc_loss(self, rewards):
     loss = FactoredLossExpr()
     ## Z-Normalization
     if self.z_normalization:
         reward_batches = dy.concatenate_to_batch(rewards)
         mean_batches = dy.mean_batches(reward_batches)
         std_batches = dy.std_batches(reward_batches)
         rewards = [
             dy.cdiv(reward - mean_batches, std_batches)
             for reward in rewards
         ]
     ## Calculate baseline
     if self.baseline is not None:
         pred_reward, baseline_loss = self.calc_baseline_loss(rewards)
         loss.add_loss("rl_baseline", baseline_loss)
     ## Calculate Confidence Penalty
     if self.confidence_penalty:
         loss.add_loss("rl_confpen",
                       self.confidence_penalty.calc_loss(self.policy_lls))
     ## Calculate Reinforce Loss
     reinf_loss = []
     # Loop through all action in one sequence
     for i, (policy,
             action_sample) in enumerate(zip(self.policy_lls,
                                             self.actions)):
         # Discount the reward if we use baseline
         if self.baseline is not None:
             rewards = [reward - pred_reward[i] for reward in rewards]
         # Main Reinforce calculation
         sample_loss = []
         for action, reward in zip(action_sample, rewards):
             ll = dy.pick_batch(policy, action)
             if self.valid_pos is not None:
                 ll = dy.pick_batch_elems(ll, self.valid_pos[i])
                 reward = dy.pick_batch_elems(reward, self.valid_pos[i])
             sample_loss.append(dy.sum_batches(ll * reward))
         # Take the average of the losses accross multiple samples
         reinf_loss.append(dy.esum(sample_loss) / len(sample_loss))
     loss.add_loss("rl_reinf", self.weight * -dy.esum(reinf_loss))
     ## the composed losses
     return loss
コード例 #14
0
def train(epoch):
    random.shuffle(train_data)
    i = 0
    epoch_start = time.time()
    while i < len(train_data):
        dy.renew_cg()
        lbls = []
        imgs = []
        for lbl, img in train_data[i:i + args.batch_size]:
            lbls.append(lbl)
            imgs.append(img)
        losses = network.create_network_return_loss(imgs, lbls, dropout=True)
        loss = dy.mean_batches(losses)
        if (int(i / args.batch_size)) % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, i, len(train_data), 100. * i / len(train_data),
                loss.value()))
        loss.backward()
        trainer.update()
        i += args.batch_size
    epoch_end = time.time()
    print("{} s per epoch".format(epoch_end - epoch_start))
コード例 #15
0
def train(epoch):
    random.shuffle(train_data)
    i = 0
    epoch_start = time.time()
    while i < len(train_data):
        dy.renew_cg()
        lbls = []
        imgs = []
        for lbl, img in train_data[i:i+args.batch_size]:
            lbls.append(lbl)
            imgs.append(img)
        losses = network.create_network_return_loss(imgs, lbls, dropout=True)
        loss = dy.mean_batches(losses)
        if (int(i/args.batch_size)) % args.log_interval == 0:
                        print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                                epoch, i, len(train_data),
                                100. * i/len(train_data), loss.value()))
        loss.backward()
        trainer.update()
        i += args.batch_size
    epoch_end = time.time()
    print("{} s per epoch".format(epoch_end-epoch_start))
コード例 #16
0
ファイル: train.py プロジェクト: dpressel/baseline
 def _loss(outputs, labels):
     losses = [dy.pickneglogsoftmax_batch(out, label) for out, label in zip(outputs, labels)]
     loss = dy.mean_batches(dy.average(losses))
     return loss
コード例 #17
0
    def _train(self, ts, **kwargs):
        self.model.train = True
        reporting_fns = kwargs.get('reporting_fns', [])
        epoch_loss = 0
        epoch_norm = 0
        auto_norm = 0
        metrics = {}
        steps = len(ts)
        last = steps
        losses = []
        i = 1
        pg = create_progress_bar(steps)
        dy.renew_cg()
        for batch_dict in pg(ts):

            inputs = self.model.make_input(batch_dict)
            y = inputs.pop('y')
            pred = self.model.compute_unaries(inputs)
            bsz = self._get_batchsz(y)
            if self.autobatchsz is None:
                losses = self.model.loss(pred, y)
                loss = dy.mean_batches(losses)
                lossv = loss.npvalue().item()
                report_loss = lossv * bsz
                epoch_loss += report_loss
                epoch_norm += bsz
                self.nstep_agg += report_loss
                self.nstep_div += bsz
                loss.backward()
                self.optimizer.update()
                dy.renew_cg()
                # TODO: Abstract this somewhat, or else once we have a batched tagger have 2 trainers
                if (self.optimizer.global_step + 1) % self.nsteps == 0:
                    metrics = self.calc_metrics(self.nstep_agg, self.nstep_div)
                    self.report(self.optimizer.global_step + 1, metrics,
                                self.nstep_start, 'Train', 'STEP',
                                reporting_fns, self.nsteps)
                    self.reset_nstep()
            else:
                loss = self.model.loss(pred, y)
                losses.append(loss)
                self.nstep_div += bsz
                epoch_norm += bsz
                auto_norm += bsz

                if i % self.autobatchsz == 0 or i == last:
                    loss = dy.average(losses)
                    lossv = loss.npvalue().item()
                    loss.backward()
                    self.optimizer.update()
                    report_loss = lossv * auto_norm
                    epoch_loss += report_loss
                    self.nstep_agg += report_loss
                    losses = []
                    dy.renew_cg()
                    if (self.optimizer.global_step + 1) % self.nsteps == 0:
                        metrics = self.calc_metrics(self.nstep_agg,
                                                    self.nstep_div)
                        self.report(self.optimizer.global_step + 1, metrics,
                                    self.nstep_start, 'Train', 'STEP',
                                    reporting_fnsa, self.nsteps)
                        self.reset_nstep()
                    auto_norm = 0
            i += 1

        metrics = self.calc_metrics(epoch_loss, epoch_norm)
        return metrics
コード例 #18
0
 def predict_loss(self, encodings, usr):
     avg_enc = dy.mean_dim(encodings, 1)
     h = dy.rectify(dy.affine_transform([self.bh, self.Wh, avg_enc]))
     s = dy.affine_transform([self.bu, self.Wu, h])
     return dy.mean_batches(dy.squared_distance(s, self.usr_vec))
コード例 #19
0
 def predict_loss(self, encodings, usr):
     avg_enc = dy.mean_dim(encodings, 1)
     h = dy.rectify(dy.affine_transform([self.bh, self.Wh, avg_enc]))
     s = dy.affine_transform([self.bu, self.Wu, h])
     return dy.mean_batches(dy.pickneglogsoftmax(s, usr))
コード例 #20
0
def train(args, network, train_batches, dev_batches, log=None):
    """Estimate model parameters on `train_batches`
    with early stopping on`dev_batches`"""
    # Logger
    log = log or util.Logger(verbose=args.verbose, flush=True)
    # Optimizer
    trainer = dy.AdamTrainer(network.pc, alpha=args.lr)
    # Start training
    log("Starting training")
    best_accuracy = 0
    deadline = 0
    running_nll = n_processed = 0
    report_every = ceil(len(train_batches) / 10)
    # Start training
    for epoch in range(1, args.n_epochs + 1):
        # Time the epoch
        start_time = time.time()
        for batch, y in train_batches:
            # Renew the computation graph
            dy.renew_cg()
            # Initialize layers
            network.init(test=False, update=True)
            # Compute logits
            logits = network(batch)
            # Loss function
            nll = dy.mean_batches(dy.pickneglogsoftmax_batch(logits, y))
            # Backward pass
            nll.backward()
            # Update the parameters
            trainer.update()
            # Keep track of the nll
            running_nll += nll.value() * batch.batch_size
            n_processed += batch.batch_size
            # Print the current loss from time to time
            if train_batches.just_passed_multiple(report_every):
                avg_nll = running_nll / n_processed
                log(f"Epoch {epoch}@{train_batches.percentage_done():.0f}%: "
                    f"NLL={avg_nll:.3f}")
                running_nll = n_processed = 0
        # End of epoch logging
        avg_nll = running_nll / n_processed
        log(f"Epoch {epoch}@100%: NLL={avg_nll:.3f}")
        log(f"Took {time.time()-start_time:.1f}s")
        log("=" * 20)
        # Validate
        accuracy = evaluate(args, network, dev_batches)
        # Print final result
        log(f"Dev accuracy: {accuracy*100:.2f}%")
        # Early stopping
        if accuracy > best_accuracy:
            best_accuracy = accuracy
            dynn.io.save(network.pc, args.model_file)
            deadline = 0
        else:
            if deadline < args.patience:
                dynn.io.populate(network.pc, args.model_file)
                trainer.learning_rate *= args.lr_decay
                deadline += 1
            else:
                log("Early stopping with best accuracy "
                    f"{best_accuracy*100:.2f}%")
                break
    # Load best model
    dynn.io.populate(network.pc, args.model_file)
    return best_accuracy
コード例 #21
0
ファイル: train.py プロジェクト: dpressel/baseline
    def _train(self, ts, **kwargs):
        self.model.train = True
        reporting_fns = kwargs.get('reporting_fns', [])
        epoch_loss = 0
        epoch_norm = 0
        auto_norm = 0
        metrics = {}
        steps = len(ts)
        last = steps
        losses = []
        i = 1
        pg = create_progress_bar(steps)
        dy.renew_cg()
        for batch_dict in pg(ts):

            inputs = self.model.make_input(batch_dict)
            y = inputs.pop('y')
            pred = self.model.compute_unaries(inputs)
            bsz = self._get_batchsz(y)
            if self.autobatchsz is None:
                losses = self.model.loss(pred, y)
                loss = dy.mean_batches(losses)
                lossv = loss.npvalue().item()
                report_loss = lossv * bsz
                epoch_loss += report_loss
                epoch_norm += bsz
                self.nstep_agg += report_loss
                self.nstep_div += bsz
                loss.backward()
                self.optimizer.update()
                dy.renew_cg()
                # TODO: Abstract this somewhat, or else once we have a batched tagger have 2 trainers
                if (self.optimizer.global_step + 1) % self.nsteps == 0:
                    metrics = self.calc_metrics(self.nstep_agg, self.nstep_div)
                    self.report(
                        self.optimizer.global_step + 1, metrics, self.nstep_start,
                        'Train', 'STEP', reporting_fns, self.nsteps
                    )
                    self.reset_nstep()
            else:
                loss = self.model.loss(pred, y)
                losses.append(loss)
                self.nstep_div += bsz
                epoch_norm += bsz
                auto_norm += bsz

                if i % self.autobatchsz == 0 or i == last:
                    loss = dy.average(losses)
                    lossv = loss.npvalue().item()
                    loss.backward()
                    self.optimizer.update()
                    report_loss = lossv * auto_norm
                    epoch_loss += report_loss
                    self.nstep_agg += report_loss
                    losses = []
                    dy.renew_cg()
                    if (self.optimizer.global_step + 1) % self.nsteps == 0:
                        metrics = self.calc_metrics(self.nstep_agg, self.nstep_div)
                        self.report(
                            self.optimizer.global_step + 1, metrics, self.nstep_start,
                            'Train', 'STEP', reporting_fnsa, self.nsteps
                        )
                        self.reset_nstep()
                    auto_norm = 0
            i += 1

        metrics = self.calc_metrics(epoch_loss, epoch_norm)
        return metrics