Example #1
0
    def forward(self, *args, **kwargs):
        if isinstance(self.label_key, int):
            if not (-len(args) <= self.label_key < len(args)):
                msg = 'Label key %d is out of bounds' % self.label_key
                raise ValueError(msg)
            t = args[self.label_key]
            if self.label_key == -1:
                args = args[:-1]
            else:
                args = args[:self.label_key] + args[self.label_key + 1:]
        elif isinstance(self.label_key, str):
            if self.label_key not in kwargs:
                msg = 'Label key "%s" is not found' % self.label_key
                raise ValueError(msg)
            t = kwargs[self.label_key]
            del kwargs[self.label_key]

        self.y = None
        self.loss = None
        self.accuracy = None
        self.y = self.predictor(*args, **kwargs)
        self.loss = self.lossfun(self.y, t)
        reporter.report({'loss': self.loss}, self)
        if self.compute_accuracy:
            self.accuracy = F.accuracy(self.y, t)
            self.precision, self.recall, self.f1_score = F.classification_summary(
                self.y, t)[:3]
            reporter.report({'recall': self.recall[1]}, self)
            reporter.report({'accuracy': self.accuracy}, self)
            reporter.report({'f1_score': self.f1_score[1]}, self)
            reporter.report({'precision': self.precision[1]}, self)
        return self.loss
Example #2
0
    def __call__(self, *args):
        loss = super().__call__(*args)
        t = args[self.label_key]

        summary = F.classification_summary(self.y, t, label_num=2)
        if chainer.config.user_gpu_mode:
            summary = [self.xp.asnumpy(v.array) for v in summary]
        else:
            summary = [v.array for v in summary]

        # NaN対策
        y = self.y.array
        pre = summary[0][1] if (
            y[:, 1] > y[:, 0]).sum() >= 1 else 0  # 全部負例と予測した場合は0
        rec = summary[1][1] if summary[3][1] >= 1 else 0  # そもそもバッチに負例しかないときは0
        F_measure = summary[2][
            1] if pre > 0 or rec > 0 else 0  # precisionとrecallが両方0のときは0
        accuracy = F.accuracy(self.y, t)
        report({
            "pre": pre,
            "rec": rec,
            "F": F_measure,
            "accuracy": accuracy
        }, self)

        return loss
Example #3
0
    def __call__(self, x, t):
        y = self.predictor(x)
        if self.lastlayer == 1:  # The number of last layer units = 1
            loss = F.sigmoid_cross_entropy(y, t.reshape(len(t), 1))
            accuracy = F.binary_accuracy(y, t.reshape(len(t), 1))
        else:  # The number of last layer units = 2
            loss = F.softmax_cross_entropy(y, t)
            accuracy = F.accuracy(y, t)
        summary = F.classification_summary(y, t, beta=1.0)
        precision = summary[0]
        recall = summary[1]
        f_value = summary[2]
        reporter = Reporter()
        observer = object()
        reporter.add_observer('f_value:', observer)
        observation = {}
        with reporter.scope(observation):
            reporter.report({'x': f_value}, observer)
        report(
            {
                'loss': loss,
                'accuracy': accuracy,
                'precision': precision,
                'recall': recall,
                'f_value': f_value
            }, self)
        report(dict(('f_value_%d' % i, val) for i, val in enumerate(f_value)),
               self)

        return loss
Example #4
0
    def __call__(self, *args):
        """Computes the loss value for an input and label pair.

        It also computes accuracy and stores it to the attribute.

        Args:
            args (list of ~chainer.Variable): Input minibatch.

        The all elements of ``args`` but last one are features and
        the last element corresponds to ground truth labels.
        It feeds features to the predictor and compare the result
        with ground truth labels.

        Returns:
            ~chainer.Variable: Loss value.

        """

        assert len(args) >= 2
        x = args[:-1]
        t = args[-1]
        self.y = None
        self.loss = None
        self.accuracy = None
        self.y = self.predictor(*x)
        self.loss = self.lossfun(self.y, t)

        #print self.y.shape
        #print self.loss
        #print self.loss.shape,self.loss.dtype
        #exit()

        summary = F.classification_summary(self.y, t, beta=1.0)
        precision = summary[0]
        recall = summary[1]
        f_value = summary[2]

        reporter.report({'loss': self.loss}, self)
        reporter.report(
            dict(('precision_%d' % i, val) for i, val in enumerate(precision)),
            self)
        reporter.report(
            dict(('recall_%d' % i, val) for i, val in enumerate(recall)), self)
        reporter.report(
            dict(('f_value_%d' % i, val) for i, val in enumerate(f_value)),
            self)
        if self.compute_accuracy:
            self.accuracy = self.accfun(self.y, t)
            reporter.report({'accuracy': self.accuracy}, self)
        return self.loss
Example #5
0
    def __call__(self, x, t, train=False):
        y = self.predictor(x)

        if self.lastlayer == 1:  # The number of last layer units = 1
            loss = F.sigmoid_cross_entropy(y, t.reshape(len(t), 1))
            accuracy = F.binary_accuracy(y, t.reshape(len(t), 1))
            f1 = F.f1_score(y, t)
        else:  # The number of last layer units = 2
            loss = F.softmax_cross_entropy(y, t)
            accuracy = F.accuracy(y, t).data
            f1 = F.f1_score(y, t)[0].data
            # f1 = F.f1_score(y, t)
        summary = F.classification_summary(y, t, beta=1.0)
        precision = summary[0]
        recall = summary[1]
        f_value = summary[2]

        return accuracy.min(), f1[0]
Example #6
0
    def check_forward(self, xp):
        y = chainer.Variable(xp.asarray(self.y))
        t = chainer.Variable(xp.asarray(self.t))
        p_actual, r_actual, fbeta_actual, s_actual = F.classification_summary(
            y, t, self.label_num, self.beta, self.ignore_label)

        pred = self.y.argmax(axis=1).reshape(self.t.shape)
        p_expect = precision(pred, self.t, self.dtype, 3, self.ignore_label)
        r_expect = recall(pred, self.t, self.dtype, 3, self.ignore_label)
        fbeta_expect = fbeta_score(p_expect, r_expect, self.beta)
        s_expect = support(self.t, self.dtype, 3, self.ignore_label)
        chainer.testing.assert_allclose(p_actual.data, p_expect,
                                        **self.check_forward_options)
        chainer.testing.assert_allclose(r_actual.data, r_expect,
                                        **self.check_forward_options)
        chainer.testing.assert_allclose(fbeta_actual.data, fbeta_expect,
                                        **self.check_forward_options)
        chainer.testing.assert_allclose(s_actual.data, s_expect,
                                        **self.check_forward_options)
    def check_forward(self, xp):
        y = chainer.Variable(xp.asarray(self.y))
        t = chainer.Variable(xp.asarray(self.t))
        p_actual, r_actual, fbeta_actual, s_actual = F.classification_summary(
            y, t, self.label_num, self.beta, self.ignore_label)

        pred = self.y.argmax(axis=1).reshape(self.t.shape)
        p_expect = precision(pred, self.t, self.dtype,
                             3, self.ignore_label)
        r_expect = recall(pred, self.t, self.dtype,
                          3, self.ignore_label)
        fbeta_expect = fbeta_score(p_expect, r_expect, self.beta)
        s_expect = support(self.t, self.dtype,
                           3, self.ignore_label)
        chainer.testing.assert_allclose(p_actual.data, p_expect,
                                        **self.check_forward_options)
        chainer.testing.assert_allclose(r_actual.data, r_expect,
                                        **self.check_forward_options)
        chainer.testing.assert_allclose(fbeta_actual.data, fbeta_expect,
                                        **self.check_forward_options)
        chainer.testing.assert_allclose(s_actual.data, s_expect,
                                        **self.check_forward_options)
def main():
    parser = argparse.ArgumentParser(description='chainer formulanet test')

    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=32,
                        help='Number of examples in each mini-batch')
    parser.add_argument('--device',
                        type=str,
                        default="-1",
                        help='Device specifier. Either ChainerX device '
                        'specifier or an integer. If non-negative integer, '
                        'CuPy arrays with specified device id are used. If '
                        'negative integer, NumPy arrays are used')
    parser.add_argument('--dataset', '-i', default="holstep", help='HDF5 file')
    parser.add_argument('--out', '-o', help='output CSV file')
    parser.add_argument('--model', '-m', default='', help='Saved model file')
    parser.add_argument('--conditional',
                        action='store_true',
                        help='Use contional model')
    parser.add_argument('--preserve-order',
                        action='store_true',
                        help='Use order-preserving model')
    parser.add_argument('--steps',
                        type=int,
                        default="3",
                        help='Number of update steps')

    args = parser.parse_args()

    device = chainer.get_device(args.device)

    print('# Device: {}'.format(str(device)))
    print('# conditional: {}'.format(args.conditional))
    print('# order_preserving: {}'.format(args.preserve_order))
    print('# steps: {}'.format(args.steps))
    print('')

    test_h5f = h5py.File(args.dataset, 'r')
    test = formulanet.Dataset(symbols.symbols, test_h5f)
    test_iter = iterators.SerialIterator(test,
                                         args.batchsize,
                                         repeat=False,
                                         shuffle=False)
    print(len(test))

    model = formulanet.FormulaNet(vocab_size=len(symbols.symbols),
                                  steps=args.steps,
                                  order_preserving=args.preserve_order,
                                  conditional=args.conditional)
    chainer.serializers.load_npz(args.model, model)
    model.to_device(device)

    with chainer.using_config('train', False):
        with chainer.using_config('enable_backprop', False):
            expected = []
            logits = []

            with tqdm(total=len(test)) as pbar:
                for batch in test_iter:
                    gs, tuples = formulanet.convert(batch, device)
                    logits1, _loss = model._forward(gs, tuples)
                    logits.append(chainer.backends.cuda.to_cpu(logits1.array))
                    expected.extend(1 if y else 0
                                    for (conj, stmt, y) in tuples)
                    pbar.update(len(batch))

            logits = np.concatenate(logits)
            expected = np.array(expected, dtype=np.int32)

            df = pd.DataFrame({
                "logits_false": logits[:, 0],
                "logits_true": logits[:, 1],
                "expected": expected
            })
            df.to_csv(args.out, index=False)

            accuracy = F.accuracy(logits, expected).array
            precision, recall, F_beta_score, support = F.classification_summary(
                logits, expected)
            print("accuracy: {}".format(accuracy))
            print("precision: {}".format(precision.array[1]))
            print("recall: {}".format(recall.array[1]))
            print("F beta score: {}".format(F_beta_score.array[1]))
            print("support: {}".format(support.array))
Example #9
0
def main():
    schema = 'filename\tTop1\tTop5\tF-Score\tEntropy\tPrecision\tRecall\tF-score'
    parser = argparse.ArgumentParser(
        description='Target Model Tester \n ({})'.format(schema))
    parser.add_argument('--config_path',
                        type=str,
                        default='configs/base.yml',
                        help='path to config file')
    parser.add_argument('--results_dir',
                        type=str,
                        default='./result/',
                        help='directory to save the results to')
    parser.add_argument('--batchsize',
                        type=int,
                        default=128,
                        help='Batchsize for testing')
    parser.add_argument('--process_num', type=int, default=0)
    parser.add_argument('--seed', type=int, default=42)

    args = parser.parse_args()
    config = yaml_utils.Config(
        yaml.load(open(args.config_path), Loader=yaml.SafeLoader))
    pattern = "-".join([
        config.pattern, config.models['classifier']['name'],
        config.dataset['dataset_name']
    ])
    out_path = args.results_dir + '/' + pattern

    # Model
    model_path = out_path + '/classifier{}.npz'.format(args.process_num)
    model = load_pretrained_models(config.models['classifier'], model_path)

    # Dataset
    test_dataset = yaml_utils.load_dataset(config, test=True)
    test_itr = chainer.iterators.SerialIterator(test_dataset,
                                                args.batchsize,
                                                repeat=False)

    chainer.cuda.get_device_from_id(0).use()
    model.to_gpu()  # Copy the model to the GPU

    xp = model.xp

    pred_labels = []
    correct_labels = []
    count = 0
    with chainer.using_config('train', False):
        for batch in test_itr:
            batchsize = len(batch)
            images = [batch[i][0] for i in range(batchsize)]
            labels = [batch[i][1] for i in range(batchsize)]
            x = xp.array(images)
            result = model(x).data
            pred_labels.append(chainer.cuda.to_cpu(result))
            correct_labels.append(np.array(labels))
            count += 1

    pred_labels = np.concatenate(pred_labels)
    correct_labels = np.concatenate(correct_labels)
    top1 = F.mean(F.accuracy(pred_labels, correct_labels)).data
    top5 = calc_top5_acc(pred_labels, correct_labels)
    precision, recall, Fscore, _ = F.classification_summary(
        pred_labels, correct_labels)
    out_results = {
        'test_{}'.format(args.process_num): {
            'accuracy': float(top1),
            'top-5 accuracy': float(top5),
            'precision': float(F.mean(precision).data),
            'recall': float(F.mean(recall).data),
            'f-score': float(F.mean(Fscore).data)
        }
    }

    result_path = out_path + '/test_result.yaml'
    if os.path.exists(result_path):
        result_yaml = yaml.load(open(result_path, 'r+'),
                                Loader=yaml.SafeLoader)
    else:
        result_yaml = {}
    result_yaml.update(out_results)
    with open(result_path, mode='w') as f:
        f.write(yaml.dump(result_yaml, default_flow_style=False))

    print('{}\t{}\t{}\t{}\t{}\t{}'.format(pattern, top1, top5,
                                          F.mean(precision).data,
                                          F.mean(recall).data,
                                          F.mean(Fscore).data))
    return out_results
 def forward():
     return F.classification_summary(
         y, t, self.label_num, self.beta, self.ignore_label)
Example #11
0
def main(args):
    # jsonファイルから学習モデルのパラメータを取得する
    n_out, n_unit, actfun = GET.jsonData(args.param,
                                         ['n_out', 'n_unit', 'actfun'])
    # 学習モデルを生成する
    model = L.Classifier(
        CNT(n_out, n_unit, GET.actfun(actfun), base=L.ResNet50Layers(None)))
    # load_npzのpath情報を取得し、学習済みモデルを読み込む
    load_path = FNC.checkModelType(args.model)
    try:
        chainer.serializers.load_npz(args.model, model, path=load_path)
    except:
        import traceback
        traceback.print_exc()
        print(FNC.fileFuncLine())
        exit()

    # GPUの設定
    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()
        xp = cupy
    else:
        xp = np
        # model.to_intel64()

    # 画像の生成
    x = []
    t = []
    for i in range(n_out):
        x.extend(
            create(args.other_path, args.human_path, args.background_path,
                   args.obj_size, args.img_size, args.obj_num, i,
                   args.img_num))
        t.extend([i] * args.img_num)

    x = imgs2resnet(np.array(x), xp)
    t = xp.array(t, dtype=np.int8)
    print(x.shape, t.shape)

    # 学習モデルを実行する
    with chainer.using_config('train', False):
        st = time.time()
        y = model.predictor(x)
        print('exec time: {0:.2f}[s]'.format(time.time() - st))

    # 適合率(precisiton)と再現率(recall)とF値を検証する
    # precision: 正解の人数を答えたうち、本当に正解の人数だった確率
    #            (正解が一人の場合に)別の人数を回答すると下がる
    # recall:    正解の人数に対して、本当に正解の人数を答えられた確率
    #            (正解が一人でない場合に)一人だと回答すると下がる
    # F score:   2/((1/recall)+(1/precision))
    print('t:', t)
    print('y:', y.data.argmax(axis=1))
    p, r, f, _ = F.classification_summary(y, t)
    precision = p.data.tolist()
    recall = r.data.tolist()
    F_score = f.data.tolist()
    print('num|precision|recall|F')
    [
        print('{0:3}|    {1:4.3f}| {2:4.3f}| {3:4.3f}'.format(
            i, elem[0], elem[1], elem[2]))
        for i, elem in enumerate(zip(precision, recall, F_score))
    ]
 def forward():
     return F.classification_summary(
         y, t, self.label_num, self.beta, self.ignore_label)
Example #13
0
def classification_summary_flatten(y, t):
    t = t.flatten()
    summary = chaFunc.classification_summary(y, t)
    return summary
Example #14
0
    def run(self):

        train_losses, valid_losses, train_accs, valid_accs = [], [], [], []
        #train_f1_scores, valid_f1_scores = [], []
        train_precision_scores, valid_precision_scores = [], []
        train_recall_scores, valid_recall_scores = [], []
        train_f1_scores, valid_f1_scores = [], []
        for j in range(self.n_out):
            train_precision_scores.append([])
            train_recall_scores.append([])
            train_f1_scores.append([])
            valid_precision_scores.append([])
            valid_recall_scores.append([])
            valid_f1_scores.append([])
        sum_train_loss, sum_train_accuracy = 0, 0
        all_train_t, all_valid_t = cupy.empty((0), cupy.int32), cupy.empty(
            (0), cupy.int32)
        all_train_y, all_valid_y = cupy.empty(
            (0, args.n_out), cupy.float32), cupy.empty((0, args.n_out),
                                                       cupy.float32)
        best_valid_loss = np.inf
        best_valid_acc = np.inf

        #early stopping counter
        patience_counter = 0

        while self.train_iter.epoch < self.epoch:

            # train phase
            batch = self.train_iter.next()
            if self.flag_train:

                # step by step update
                x_array, t_array = convert.concat_examples(batch, self.gpu)
                #print('x_array',x_array.shape,type(x_array))
                #print('t_array',t_array.shape,type(t_array))
                all_train_t = cupy.hstack([all_train_t, t_array])

                x, t = chainer.Variable(x_array), chainer.Variable(t_array)
                x = cupy_augmentation(x)  #added at 2019/09/10

                self.model.cleargrads()
                y, loss, accuracy, _ = self.model(x, t)
                #y_for_f1=cupy.argmax(y.data,axis=1)
                all_train_y = cupy.vstack([all_train_y, y.data])
                #print(all_train_y.shape

                loss.backward()
                self.optimizer.update()

                sum_train_loss += float(loss.data) * len(t.data)
                sum_train_accuracy += float(accuracy.data) * len(t.data)
            #train_f1_score=F.classification_s

            # valid phase
            if self.train_iter.is_new_epoch:

                # Return objects Loss
                mean_train_loss = sum_train_loss / self.N_train
                train_losses.append(mean_train_loss)

                # Return objects Acc
                mean_train_acc = sum_train_accuracy / self.N_train
                train_accs.append(mean_train_acc)

                # Return objects f1_score
                #train_f1_score=F.classification_summary(all_train_y,all_train_t)[2][1]
                #print(train_f1_score)
                #train_f1_scores.append(train_f1_score)
                for j in range(self.n_out):
                    train_precision_score = F.classification_summary(
                        all_train_y, all_train_t)[0][j]
                    train_precision_scores[j].append(train_precision_score)
                    train_recall_score = F.classification_summary(
                        all_train_y, all_train_t)[1][j]
                    train_recall_scores[j].append(train_recall_score)
                    train_f1_score = F.classification_summary(
                        all_train_y, all_train_t)[2][j]
                    train_f1_scores[j].append(train_f1_score)

                sum_valid_accuracy, sum_valid_loss = 0, 0
                all_train_t, all_valid_t = cupy.empty(
                    (0), cupy.int32), cupy.empty((0), cupy.int32)
                all_train_y, all_valid_y = cupy.empty(
                    (0, args.n_out), cupy.float32), cupy.empty((0, args.n_out),
                                                               cupy.float32)

                for batch in self.valid_iter:
                    x_array, t_array = convert.concat_examples(batch, self.gpu)
                    all_valid_t = cupy.hstack([all_valid_t, t_array])
                    #t_for_f1=np.argmax(t_array,axis=1)
                    x, t = chainer.Variable(x_array), chainer.Variable(t_array)

                    with chainer.using_config(
                            'train', False), chainer.no_backprop_mode():
                        y, loss, accuracy, f1_score = self.model(x, t)
                        #y_for_f1=cupy.argmax(y.data,axis=1)
                        #print('y_for_f1',y_for_f1.shape)

                    sum_valid_loss += float(loss.data) * len(t.data)
                    sum_valid_accuracy += float(accuracy.data) * len(t.data)
                    all_valid_y = cupy.vstack([all_valid_y, y.data])

                # Return objects Loss
                mean_valid_loss = sum_valid_loss / self.N_valid
                valid_losses.append(mean_valid_loss)

                # Return objects valid
                mean_valid_acc = sum_valid_accuracy / self.N_valid
                valid_accs.append(mean_valid_acc)

                # Return objects f1_score
                #print(all_valid_y.dtype,all_valid_t.dtype)
                #print(all_valid_y.shape,all_valid_t.shape)
                #print(np.max(all_valid_t))
                #print(F.classification_summary(all_valid_y,all_valid_t))
                for j in range(self.n_out):
                    valid_precision_score = F.classification_summary(
                        all_valid_y, all_valid_t)[0][j]
                    valid_precision_scores[j].append(valid_precision_score)
                    valid_recall_score = F.classification_summary(
                        all_valid_y, all_valid_t)[1][j]
                    valid_recall_scores[j].append(valid_recall_score)
                    valid_f1_score = F.classification_summary(
                        all_valid_y, all_valid_t)[2][j]
                    valid_f1_scores[j].append(valid_f1_score)

                self.valid_iter.reset()

                if mean_valid_loss < best_valid_loss:
                    # update best
                    best_valid_loss = mean_valid_loss
                    best_valid_acc = mean_valid_acc
                    #print(train_f1_score.data)
                    print(
                        "e %d/%d, train_loss %f, valid_loss(Best) %f, train_accuracy %f, valid_accuracy %f ,train_f1 %f , valid_f1 %f"
                        % (self.train_iter.epoch, args.epoch, mean_train_loss,
                           best_valid_loss, mean_train_acc, mean_valid_acc,
                           train_f1_score.data, valid_f1_score.data))
                    save_flag = 1
                    patience_counter = 0  #Important! reset the patience_counter

                else:
                    patience_counter = patience_counter + 1
                    print('patience_counter is accumulated, counter is ' +
                          str(patience_counter))
                    save_flag = 0
                    print(
                        "e %d/%d, train_loss %f, valid_loss %f, train_accuracy %f, valid_accuracy %f ,train_f1 %f, valid_f1 %f"
                        % (self.train_iter.epoch, args.epoch, mean_train_loss,
                           mean_valid_loss, mean_train_acc, mean_valid_acc,
                           train_f1_score.data, valid_f1_score.data))

                sum_train_loss, sum_train_accuracy = 0, 0
                all_train_t, all_valid_t = cupy.empty(
                    (0), cupy.int32), cupy.empty((0), cupy.int32)
                all_train_y, all_valid_y = cupy.empty(
                    (0, args.n_out), cupy.float32), cupy.empty((0, args.n_out),
                                                               cupy.float32)

                if self.save_interval > 0:
                    if self.train_iter.epoch % self.save_interval == 0 or self.train_iter.epoch == self.epoch or save_flag == 1:
                        try:
                            chainer.serializers.save_npz(
                                save_dir + '/' + self.prefix + "_e" +
                                str(self.train_iter.epoch) + '.model',
                                self.model)
                            chainer.serializers.save_npz(
                                save_dir + '/' + self.prefix + "_e" +
                                str(self.train_iter.epoch) + '.state',
                                self.optimizer)
                            print('Successfully saved model')
                        except:
                            print('WARN: saving model ignored')

            # early stopping
            if patience_counter >= patience_limit:
                break
        return train_losses, valid_losses, train_accs, valid_accs, best_valid_loss, train_f1_scores, valid_f1_scores, train_precision_scores, valid_precision_scores, train_recall_scores, valid_recall_scores, best_valid_acc
Example #15
0
    def __call__(self, *args, **kwargs):

        if isinstance(self.label_link_key, str) and isinstance(self.label_link_key, str):
            if self.label_link_key not in kwargs:
                msg = 'Label key "%s" is not found' % self.label_link_key
                raise ValueError(msg)
            if self.label_type_key not in kwargs:
                msg = 'Label key "%s" is not found' % self.label_type_key
                raise ValueError(msg)
            t_link = kwargs[self.label_link_key]
            t_type = kwargs[self.label_type_key]

            # flatten and remove label -1
            t_type = t_type[t_type > -1]

            t_link_type = kwargs[self.label_link_type_key]

            # flatten and remove label -1
            t_link_type = t_link_type[t_link_type > -1]

        self.y = None
        self.loss = 0
        self.accuracy = None
        self.y = self.predictor(*args, **kwargs)

        #y_link = (batchsize*N_max_spans, 13)
        self.y_link = self.y[0]
        y_link_mst = decode_mst(self.y_link, t_link, self.max_n_spans)

        # y_type = (n_spans, 3)
        self.y_type = self.y[1]
        self.y_link_type = self.y[2]

        self.loss = 0
        self.loss_link = self.lossfun(self.y_link, t_link)
        reporter.report({'loss_link': self.loss_link}, self)
        self.loss += (1 - self.ac_type_alpha -
                      self.link_type_alpha)*self.loss_link

        assert self.ac_type_alpha + self.link_type_alpha <= 1

        if self.ac_type_alpha:
            self.loss_type = chaFunc.softmax_cross_entropy(self.y_type, t_type)
            reporter.report({'loss_ac_type': self.loss_type}, self)
            self.loss += self.ac_type_alpha*self.loss_type

        if self.link_type_alpha:
            self.loss_link_type = chaFunc.softmax_cross_entropy(self.y_link_type,
                                                                t_link_type,
                                                                ignore_label=2)
            reporter.report({'loss_link_type': self.loss_link_type}, self)
            self.loss += self.link_type_alpha*self.loss_link_type

        reporter.report({'loss': self.loss}, self)

        macro_f_scores = []

        if self.compute_accuracy:

            ###########################
            # link prediction results #
            ###########################
            self.accuracy_link = self.accfun(
                y_link_mst, chainer.cuda.to_cpu(t_link))
            reporter.report({'accuracy_link': self.accuracy_link}, self)

            if self.fscore_link_fun:
                f_binary = self.fscore_link_fun(y_link_mst,
                                                chainer.cuda.to_cpu(
                                                    t_link),
                                                self.max_n_spans)
                f_link = f_binary[0]
                f_nolink = f_binary[1]
                macro_f_link = (f_link+f_nolink)/2

                if not math.isnan(macro_f_link) and self:
                    macro_f_scores.append(macro_f_link)

                reporter.report({'f_link': f_link}, self)  # f score
                reporter.report({'f_nolink': f_nolink}, self)  # f score
                reporter.report(
                    {'macro_f_link': macro_f_link}, self)  # f score

            ##############################
            # ac_type prediction results #
            ##############################
            self.accuracy_type = chaFunc.accuracy(self.y_type, t_type)
            reporter.report({'accuracy_type': self.accuracy_type}, self)

            if self.settings.dataset == "PE":
                self.summary_type = chaFunc.classification_summary(self.y_type,
                                                                   t_type,
                                                                   label_num=3)
                f_type = self.summary_type[2]
                support_type = self.summary_type[3]
                f_premise = f_type[0]
                f_claim = f_type[1]
                f_majorclaim = f_type[2]
                macro_f_type = sum(f_type)/len(f_type)
            else:
                self.summary_type = chaFunc.classification_summary(self.y_type,
                                                                   t_type,
                                                                   label_num=2)
                f_type = self.summary_type[2]
                support_type = self.summary_type[3]
                f_premise = f_type[0]
                f_claim = f_type[1]
                f_majorclaim = 0
                macro_f_type = sum(f_type)/len(f_type)

            if self.ac_type_alpha:
                if math.isnan(macro_f_type.data):
                    macro_f_scores.append(0)
                else:
                    macro_f_scores.append(macro_f_type)

            reporter.report({'f_premise': f_premise}, self)
            reporter.report({'f_claim': f_claim}, self)
            reporter.report({'f_majorclaim': f_majorclaim}, self)
            reporter.report({'macro_f_type': sum(f_type)/len(f_type)}, self)
            reporter.report({'ac_type_predicted_class_{}'.format(i):
                             self.count_prediction(self.y_type, i)

                             for i, val in enumerate(support_type)}, self)
            reporter.report({'ac_type_gold_class_{}'.format(i): val
                             for i, val in enumerate(support_type)}, self)

            ################################
            # link type prediction results #
            ################################
            self.accuracy_type = chaFunc.accuracy(self.y_link_type,
                                                  t_link_type,
                                                  ignore_label=2)
            reporter.report({'accuracy_link_type': self.accuracy_type}, self)

            self.summary_type = chaFunc.classification_summary(self.y_link_type,
                                                               t_link_type,
                                                               label_num=2,
                                                               ignore_label=2)
            f_type = self.summary_type[2]
            support_type = self.summary_type[3]
            f_support = f_type[0]
            f_attack = f_type[1]
            macro_f_type = sum(f_type)/len(f_type)

            if self.link_type_alpha:
                if math.isnan(macro_f_type.data):
                    macro_f_scores.append(0)
                else:
                    macro_f_scores.append(macro_f_type)

            reporter.report({'f_support': f_support}, self)
            reporter.report({'f_attack': f_attack}, self)
            reporter.report(
                {'macro_f_link_type': sum(f_type)/len(f_type)}, self)
            reporter.report({'link_type_predicted_class_{}'.format(i):
                             self.count_prediction(self.y_link_type, i)

                             for i, val in enumerate(support_type)}, self)
            reporter.report({'link_type_gold_class_{}'.format(i): val
                             for i, val in enumerate(support_type)}, self)

            reporter.report({'total_macro_f': sum(
                macro_f_scores)/len(macro_f_scores)}, self)
        return self.loss