コード例 #1
0
    def validation_epoch_end(self, outputs):
        d = dict()

        # Validation loss
        d["v_loss"] = torch.stack([o["loss"] for o in outputs]).mean()
        if self.out_ch > 5 and not self.softmax:
            v_loss_5 = torch.stack([o["loss_5"] for o in outputs]).mean()
            d["v_loss_5"] = v_loss_5

        # Accuracy and Kappa score
        all_preds = torch.cat([o["pred"] for o in outputs]).cpu().numpy()
        all_targets = torch.cat([o["label"] for o in outputs]).cpu().numpy()

        # All score
        d["v_acc"] = (all_preds == all_targets).mean() * 100.0
        d["v_kappa"] = quadratic_weighted_kappa(all_targets, all_preds)

        # Score by dataprovider
        is_karolinska = self.data_provider[: len(all_preds)]
        p_k = all_preds[is_karolinska == 1]
        t_k = all_targets[is_karolinska == 1]
        p_r = all_preds[is_karolinska == 0]
        t_r = all_targets[is_karolinska == 0]
        d["v_kappa_k"] = quadratic_weighted_kappa(t_k, p_k)
        d["v_kappa_r"] = quadratic_weighted_kappa(t_r, p_r)

        ret_dict = {"progress_bar": d, "log": d.copy(), "val_loss": d["v_loss"]}
        return ret_dict
コード例 #2
0
def train(epoch, dataLoader):
    net.train()
    Acc = AverageMeter()
    preds_list = np.zeros(shape=(0, args.num_classes))
    label_list = []
    for batch_index, (images, labels) in enumerate(dataLoader):
        if epoch <= args.warm:
            warmup_scheduler.step()
        images = Variable(images)
        labels = Variable(labels)
        label_list.extend(labels)
        labels = labels.cuda()
        images = images.cuda()
        optimizer.zero_grad()
        outputs = net(images)
        loss = loss_function(outputs, labels)
        loss.backward()
        optimizer.step()

        _, preds = outputs.max(1)
        preds_list = np.concatenate((preds_list, outputs.data.cpu().numpy()))
        correct = preds.eq(labels).sum()
        Acc.update(correct.item(), labels.shape[0])
        n_iter = (epoch - 1) * len(dataLoader) + batch_index + 1

        last_layer = list(net.children())[-1]
        for name, para in last_layer.named_parameters():
            if 'weight' in name:
                writer.add_scalar('LastLayerGradients/grad_norm2_weights',
                                  para.grad.norm(), n_iter)
            if 'bias' in name:
                writer.add_scalar('LastLayerGradients/grad_norm2_bias',
                                  para.grad.norm(), n_iter)
        kappa = quadratic_weighted_kappa(outputs, labels)
        print(
            'Training Epoch: {epoch} [{trained_samples}/{total_samples}]\tLoss: {:0.4f}\tLR: {:0.8f} Acc{acc:.4f} Kappa:{kappa:.4f}'
            .format(loss.item(),
                    optimizer.param_groups[0]['lr'],
                    epoch=epoch,
                    trained_samples=batch_index * args.b + len(images),
                    total_samples=len(dataLoader.dataset),
                    kappa=kappa,
                    acc=correct))

        # update training loss for each iteration
        writer.add_scalar('Train/loss', loss.item(), n_iter)

    all_kappa = quadratic_weighted_kappa(preds_list, label_list)
    all_acc = Acc.avg
    print("all_kappa:", all_kappa, "all_acc:", all_acc)
    for name, param in net.named_parameters():
        layer, attr = os.path.splitext(name)
        attr = attr[1:]
        writer.add_histogram("{}/{}".format(layer, attr), param, epoch)
コード例 #3
0
def main():
    args = parse_args()

    # set random seed
    utils.seed_torch(42)

    model = PandaNet(arch=args.arch, pretrained=False)
    model_path = os.path.join(configure.MODEL_PATH,
                              f'{args.arch}_fold_{args.fold}_128_12.pth')

    model.load_state_dict(torch.load(model_path))
    model.cuda()

    df = pd.read_csv(configure.TRAIN_DF)

    dataset = PandaDataset(df=df, data_dir=configure.TRAIN_IMAGE_PATH)

    dataloader = DataLoader(dataset=dataset,
                            batch_size=args.batch_size,
                            num_workers=args.num_workers,
                            pin_memory=False,
                            shuffle=False)

    preds = predict(dataloader, model)
    score = utils.quadratic_weighted_kappa(preds, df['isup_grade'])
    print(score)
コード例 #4
0
def cls_val(eval_data_loader, model, criterion, ten_crop_data_loader):
	model.eval()
	tot_pred = np.array([], dtype=int)
	tot_label = np.array([], dtype=int)
	losses = AverageMeter()
	batch_time = AverageMeter()
	data_time = AverageMeter()
	end = time.time()
	for num_iter, (image, label) in enumerate(eval_data_loader):
		data_time.update(time.time() - end)
		final = model(Variable(image, requires_grad=False, volatile=True))
		if ten_crop_data_loader:
			for cropped_data_loader in ten_crop_data_loader:
				cropped_image = next(cropped_data_loader)
				final += model(Variable(cropped_image, requires_grad=False, volatile=True))
			final /= 11
		loss = criterion(final, Variable(label.cuda()))
		_, pred = torch.max(final, 1)
		pred = pred.cpu().data.numpy().squeeze()
		label = label.cpu().numpy().squeeze()
		tot_pred = np.append(tot_pred, pred)
		tot_label = np.append(tot_label, label)
		losses.update(loss.data[0], image.size(0))
		kappa = quadratic_weighted_kappa(tot_label, tot_pred)
		batch_time.update(time.time() - end)
		end = time.time()
		print('Eval: [{0}/{1}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
		      'Data {data_time.avg:.3f}\t' 'Loss {loss.avg:.4f}\t'  'Kappa {kappa:.4f}\t'
		      .format(num_iter, len(eval_data_loader), batch_time=batch_time, data_time=data_time, loss=losses, kappa=kappa))

	return kappa, tot_pred, tot_label
コード例 #5
0
def cls_train(train_data_loader, model, criterion, optimizer, epoch, display):
	model.train()
	tot_pred = np.array([], dtype=int)
	tot_label = np.array([], dtype=int)
	batch_time = AverageMeter()
	data_time = AverageMeter()
	losses = AverageMeter()
	end = time.time()
	logger = []
	for num_iter, (image, label) in enumerate(train_data_loader):
		data_time.update(time.time() - end)
		final = model(Variable(image))
		loss = criterion(final, Variable(label.cuda()))
		optimizer.zero_grad()
		loss.backward()
		optimizer.step()
		batch_time.update(time.time() - end)
		_, pred = torch.max(final, 1)
		pred = pred.cpu().data.numpy().squeeze()
		label = label.cpu().numpy().squeeze()
		tot_pred = np.append(tot_pred, pred)
		tot_label = np.append(tot_label, label)
		kappa = quadratic_weighted_kappa(tot_label, tot_pred)
		losses.update(loss.data[0], image.size(0))
		end = time.time()
		if num_iter % display == 0:
			print('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
			      'Data {data_time.avg:.3f}\t' 'Loss {loss.avg:.4f}\t'  'Kappa {kappa:.4f}\t'
			      .format(epoch, num_iter, len(train_data_loader), batch_time=batch_time, data_time=data_time, loss=losses, kappa=kappa))
			# 'Accuracy {accuracy:.2f}\t' accuracy=100 * (tot_pred == tot_label).sum() / len(tot_label) Not good for unbalanced classes
			logger.append('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
			              'Data {data_time.avg:.3f}\t' 'Loss {loss.avg:.4f}\t'  'Kappa {kappa:.4f}\t'
			              .format(epoch, num_iter, len(train_data_loader), batch_time=batch_time, data_time=data_time, loss=losses, kappa=kappa))
	return logger
def cls_val(eval_data_loader, model, criterion, ten_crop_data_loader):
    model.eval()
    tot_pred = np.array([], dtype=int)
    tot_label = np.array([], dtype=int)
    tot_image = np.array([])
    tot_prop = np.array([])
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()
    end = time.time()
    for num_iter, (image, label, name) in enumerate(eval_data_loader):
        data_time.update(time.time() - end)
        final = model(Variable(image, requires_grad=False, volatile=True))
        if ten_crop_data_loader:
            for cropped_data_loader in ten_crop_data_loader:
                cropped_image = next(cropped_data_loader)
                final += model(
                    Variable(cropped_image, requires_grad=False,
                             volatile=True))
            final /= 11
        loss = criterion(final, Variable(label.cuda()))
        _, pred = torch.max(final, 1)
        pred = pred.cpu().data.numpy().squeeze()
        label = label.cpu().numpy().squeeze()
        tot_pred = np.append(tot_pred, pred)
        tot_label = np.append(tot_label, label)
        tot_image = np.append(tot_image, name)
        m = torch.nn.Softmax()
        prop = m(final).data.cpu().numpy()
        prop = [str(p) for p in prop]
        tot_prop = np.append(tot_prop, prop)
        losses.update(loss.data[0], image.size(0))
        kappa = quadratic_weighted_kappa(tot_label, tot_pred)
        batch_time.update(time.time() - end)
        end = time.time()
        print('Eval: [{0}/{1}]\t'
              'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
              'Data {data_time.avg:.3f}\t'
              'Loss {loss.avg:.4f}\t'
              'Kappa {kappa:.4f}\t'.format(num_iter,
                                           len(eval_data_loader),
                                           batch_time=batch_time,
                                           data_time=data_time,
                                           loss=losses,
                                           kappa=kappa))
    data = np.column_stack((tot_image, tot_label, tot_pred, tot_prop))
    df = pd.DataFrame(
        data, columns=['images', 'gt_level', 'pred_level', 'cls_propbality'])
    df.to_csv('./classification_result.csv')

    return kappa, tot_pred, tot_label
コード例 #7
0
ファイル: main.py プロジェクト: QtacierP/nni_workstation
def classify_val(epoch):
    global train_dataloader
    global val_dataloader
    global test_dataloader
    global model
    global loss_func
    global optimizer
    global best_metric
    model.eval()
    c_matrix = np.zeros((base_config['n_classes'], base_config['n_classes']),
                        dtype=int)
    torch.set_grad_enabled(False)
    corrects = 0
    total = 0
    test_loss = 0
    for step, batch in enumerate(val_dataloader):
        x, y = batch
        x = x.cuda()
        y = y.cuda()
        preds = model(x)
        loss = loss_func(preds, y)
        test_loss += loss.item()
        total += y.size(0)
        acc, correct = accuracy(preds, y, c_matrix)
        corrects += correct
    acc = corrects / total
    kappa = quadratic_weighted_kappa(c_matrix)
    test_loss = test_loss / total
    if base_config['metric'] == 'acc':
        metric = acc
    elif base_config['metric'] == 'kappa':
        metric = kappa
    elif 'loss' in base_config['metric']:
        metric = test_loss
    else:
        logging.error('{} metric is not supported now'.format(
            base_config['metric']))
    if opt(metric, best_metric):
        model_path = os.path.join(base_config['model_path'], 'best_pt')
        print('Saving model to {}'.format(model_path))
        state = {
            'net': model.state_dict(),
            'acc': acc,
            'kappa': kappa,
            'loss': test_loss,
            'epoch': epoch,
        }
        torch.save(state, model_path)
        best_metric = metric
    return metric, best_metric
コード例 #8
0
def eval_training(epoch, dataLoader):
    net.eval()

    test_loss = 0.0  # cost function error
    correct = 0.0
    preds_list = np.zeros(shape=(0, args.num_classes))
    label_list = []
    for (images, labels) in dataLoader:
        images = Variable(images)
        labels = Variable(labels)
        label_list.extend(labels)
        images = images.cuda()
        labels = labels.cuda()

        with torch.no_grad():
            outputs = net(images)
        preds_list = np.concatenate((preds_list, outputs.data.cpu().numpy()))
        loss = loss_function(outputs, labels)
        test_loss += loss.item()
        _, preds = outputs.max(1)
        correct += preds.eq(labels).sum()

    kappa = quadratic_weighted_kappa(preds_list, label_list)
    y_pred = np.argmax(preds_list, axis=1)
    y_pred_fix = fix_predict(y_pred)
    fix_correct = torch.Tensor(y_pred_fix)
    t_label_list = torch.Tensor(label_list)
    fix_correct = fix_correct.eq(t_label_list).sum()
    fix_kappa = metrics.cohen_kappa_score(y_pred_fix,
                                          label_list,
                                          weights='quadratic')
    print(
        'Test set: Average loss: {:.4f}, Accuracy: {:.4f} kappa:{kappa:.4f} fix_kappa:{fix_kappa:.4f} fix_Accuracy  {fix_Accuracy:.4f}'
        .format(test_loss / len(dataLoader.dataset),
                correct.float() / len(dataLoader.dataset),
                kappa=kappa,
                fix_kappa=fix_kappa,
                fix_Accuracy=fix_correct / len(dataLoader.dataset)))
    print()

    # add informations to tensorboard
    writer.add_scalar('Test/Average loss', test_loss / len(dataLoader.dataset),
                      epoch)
    writer.add_scalar('Test/Accuracy',
                      correct.float() / len(dataLoader.dataset), epoch)

    return correct.float() / len(dataLoader.dataset), kappa, preds_list
コード例 #9
0
 def test(self, test_dataloader):
     c_matrix = np.zeros((self.args.n_classes
                          , self.args.n_classes), dtype=int)
     self.model.eval()
     torch.set_grad_enabled(False)
     total = 0
     correct = 0
     logits = []
     labels = []
     for test_data in test_dataloader:
         x, y = test_data
         x, y = x.cuda(), y.long().cuda()
         y_pred = self.model(x)
         total += y.size(0)
         logits += y_pred.cpu().tolist()
         labels  += y.cpu().tolist()
         correct += accuracy(y_pred, y, c_matrix, regression=self.args.regression) * y.size(0)
     acc = round(correct / total, 4)
     kappa = quadratic_weighted_kappa(c_matrix)
     print('')
     print(c_matrix)
     self.model.train()
     torch.set_grad_enabled(True)
     return acc, kappa
コード例 #10
0
    except:
        continue

dict_pred = {}

df = pd.DataFrame.from_csv(pred_csv)
for index, row in df.iterrows():
    dict_pred[row['image']] = row['dr_level']

list_gt = []
list_pred = []

for key in dict_pred.keys():
    list_gt.append(dict_gt[key])
    list_pred.append(dict_pred[key])

np_gt = np.array(list_gt)
np_pred = np.array(list_pred)

dr_kappa = quadratic_weighted_kappa(np_gt, np_pred)

dr_confusion_matrix = str(confusion_matrix(np_gt, np_pred))

out_file = os.path.join(root, 'kappa2.txt')

with open(out_file, 'w') as f:
    f.write('====>kappa: {}\n'.format(dr_kappa))
    f.write('===> Confusion Matrix:\n')
    f.write(dr_confusion_matrix)
    f.write('\n\n')
コード例 #11
0
def eval(eval_data_loader, model, criterion):
    model.eval()
    tot_pred_dr = np.array([], dtype=int)
    tot_label_dr = np.array([], dtype=int)
    tot_pred_dme = np.array([], dtype=int)
    tot_label_dme = np.array([], dtype=int)
    batch_time = AverageMeter()
    data_time = AverageMeter()
    accuracy = AverageMeter()
    losses_dr = AverageMeter()
    losses_dme = AverageMeter()
    losses = AverageMeter()
    end = time.time()
    logger = []
    for index, (image, label_dr, label_dme) in enumerate(eval_data_loader):
        data_time.update(time.time()-end)
        o_dr, o_dme = model(Variable(image.cuda()))
        loss_dr = criterion(o_dr, Variable(label_dr.cuda()))
        loss_dme = criterion(o_dme, Variable(label_dme.cuda()))
        loss = 0.5 * loss_dr + 0.5 * loss_dme
        batch_time.update(time.time()-end)
        _,pred_dr = torch.max(o_dr, 1)
        _,pred_dme = torch.max(o_dme, 1)
        pred_dr = pred_dr.cpu().data.numpy().squeeze()
        label_dr = label_dr.numpy().squeeze()
        pred_dme = pred_dme.cpu().data.numpy().squeeze()
        label_dme = label_dme.numpy().squeeze()

        tot_pred_dr = np.append(tot_pred_dr, pred_dr)
        tot_label_dr = np.append(tot_label_dr, label_dr)
        tot_pred_dme = np.append(tot_pred_dme, pred_dme)
        tot_label_dme = np.append(tot_label_dme, label_dme)

        #precision
        losses_dr.update(loss_dr.data[0], len(image))
        losses_dme.update(loss_dme.data[0], len(image))
        losses.update(loss.data[0], len(image))

        dr_accuracy = np.equal(tot_pred_dr, tot_label_dr).sum() / len(tot_pred_dr)
        dme_accuracy = np.equal(tot_pred_dme, tot_label_dme).sum() / len(tot_pred_dme)
        dr_kappa = quadratic_weighted_kappa(tot_label_dr, tot_pred_dr)
        dme_kappa = quadratic_weighted_kappa(tot_label_dme, tot_pred_dme)
        print_info = 'Eval: [{iter}/{tot}]\t' \
                     'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
                     'Data {data_time.avg:.3f}\t ' \
                     'Loss {loss.avg:.4f}\t' \
                     'DR_Loss {dr_loss.avg:.4f}\t' \
                     'DME_Loss {dme_loss.avg:.4f}\t' \
                     'DR_Kappa {dr_kappa:.4f}\t' \
                     'DR_Accuracy {dr_acc:.4f}\t' \
                     'DME_Kappa {dme_kappa:.4f}\t' \
                     'DME_Accuracy {dme_acc:.4f}\t'.format(iter=index, tot=len(eval_data_loader),
                                                           batch_time=batch_time,
                                                           data_time=data_time,
                                                           loss=losses,
                                                           dr_loss=losses_dr,
                                                           dme_loss=losses_dme,
                                                           dr_acc=dr_accuracy,
                                                           dme_acc=dme_accuracy,
                                                           dr_kappa=dr_kappa,
                                                           dme_kappa=dme_kappa
                                                           )
        print(print_info)
        logger.append(print_info)

    return logger, dr_kappa, dme_kappa, tot_pred_dr, tot_label_dr, tot_pred_dme, tot_label_dme
コード例 #12
0
ファイル: main.py プロジェクト: jayden199012/prt
# output transformation for regression based models
# =============================================================================
# find the offset
offsets = fmin_powell(train_offset,
                      x0, (target, train_y_pred),
                      maxiter=20000,
                      disp=True)

# in case you need one more time of minimizationg
offsets = fmin_powell(train_offset,
                      offsets, (y, train_y_pred),
                      maxiter=20000,
                      disp=True)

# evaluate
train_y_pred = digit(offsets, train_y_pred)
quadratic_weighted_kappa(target, train_y_pred)

valid_y_pred = digit(offsets, valid_y_pred)
quadratic_weighted_kappa(valid_target, valid_y_pred)

# final predict
y_pred = np.asarray(digit(offsets, test_pred))

# =============================================================================
# submit
# =============================================================================
submission = pd.read_csv('../data/test/sample_submission.csv', index_col=0)
submission['AdoptionSpeed'] = y_pred.astype('int32')
submission.to_csv('submit.csv')
コード例 #13
0
def train_bin(train_data_loader, model, criterion, optimizer, epoch, display):
    model.train()
    tot_pred_dr = np.array([], dtype=int)
    tot_label_dr = np.array([], dtype=int)
    tot_pred_dme = np.array([], dtype=int)
    tot_label_dme = np.array([], dtype=int)
    tot_pred_bin = np.array([], dtype=int)
    tot_label_bin = np.array([], dtype=int)
    batch_time = AverageMeter()
    data_time = AverageMeter()
    accuracy = AverageMeter()
    losses_dr = AverageMeter()
    losses_dme = AverageMeter()
    losses_bin = AverageMeter()
    losses = AverageMeter()
    accuracy = AverageMeter()
    end = time.time()
    logger = []
    for index, (image, label_dr, label_dme, label_bin) in enumerate(train_data_loader):
        data_time.update(time.time()-end)
        o_dr, o_dme, o_bin = model(Variable(image.cuda()))
        loss_dr = criterion(o_dr, Variable(label_dr.cuda()))
        loss_dme = criterion(o_dme, Variable(label_dme.cuda()))
        loss_bin = criterion(o_bin, Variable(label_bin.cuda()))
        loss = loss_bin
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        batch_time.update(time.time()-end)
        _,pred_dr = torch.max(o_dr, 1)
        _,pred_dme = torch.max(o_dme, 1)
        _,pred_bin = torch.max(o_bin, 1)
        pred_dr = pred_dr.cpu().data.numpy().squeeze()
        label_dr = label_dr.numpy().squeeze()
        pred_dme = pred_dme.cpu().data.numpy().squeeze()
        label_dme = label_dme.numpy().squeeze()
        pred_bin = pred_bin.cpu().data.numpy().squeeze()
        label_bin = label_bin.numpy().squeeze()

        tot_pred_dr = np.append(tot_pred_dr, pred_dr)
        tot_label_dr = np.append(tot_label_dr, label_dr)
        tot_pred_dme = np.append(tot_pred_dme, pred_dme)
        tot_label_dme = np.append(tot_label_dme, label_dme)
        tot_pred_bin = np.append(tot_pred_bin, pred_bin)
        tot_label_bin = np.append(tot_label_bin, label_bin)


        #precision
        losses_dr.update(loss_dr.data[0], len(image))
        losses_dme.update(loss_dme.data[0], len(image))
        losses_bin.update(loss_bin.data[0], len(image))
        losses.update(loss.data[0], len(image))

        accuracy.update(np.equal(pred_bin, label_bin).sum() / len(label_bin), len(label_bin))


        if index % display == 0:
            dr_accuracy = np.equal(tot_pred_dr, tot_label_dr).sum()/len(tot_pred_dr)
            dme_accuracy = np.equal(tot_pred_dme, tot_label_dme).sum()/len(tot_pred_dme)
            dr_kappa = quadratic_weighted_kappa(tot_label_dr, tot_pred_dr)
            dme_kappa = quadratic_weighted_kappa(tot_label_dme, tot_pred_dme)
            print_info = 'Epoch: [{epoch}][{iter}/{tot}]\t' \
                         'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
                         'Data {data_time.avg:.3f}\t ' \
                         'Loss {loss.avg:.4f}\t' \
                         'DR_Loss {dr_loss.avg:.4f}\t' \
                         'DME_Loss {dme_loss.avg:.4f}\t' \
                         'DR_Kappa {dr_kappa:.4f}\t' \
                         'DR_Accuracy {dr_acc:.4f}\t' \
                         'DME_Kappa {dme_kappa:.4f}\t' \
                         'DME_Accuracy {dme_acc:.4f}\t' \
                         'To_Treat_Accuracy {accuracy.avg:.4f}\t'.format(epoch=epoch, iter=index, tot=len(train_data_loader),
                                                               batch_time=batch_time,
                                                               data_time=data_time,
                                                               loss=losses,
                                                               dr_loss=losses_dr,
                                                               dme_loss=losses_dme,
                                                               dr_acc=dr_accuracy,
                                                               dme_acc=dme_accuracy,
                                                               dr_kappa=dr_kappa,
                                                               dme_kappa=dme_kappa,
                                                               accuracy=accuracy
                                                               )
            print(print_info)
            logger.append(print_info)

    return logger
コード例 #14
0
# =============================================================================
# Threshold base models
# =============================================================================

# Intermediate Threshold Model
lad_model_IT = mord.LogisticIT(alpha=1, verbose=1, max_iter=5000)

# fit model
lad_model_IT.fit(x, y)

# predict
train_y_pred = lad_model_IT.predict(x)
y_pred = lad_model_IT.predict(test_x) + 1

# evaluate
quadratic_weighted_kappa(train_y_pred, y)

# All-Threshold Model
lad_model_AT = mord.LogisticAT(alpha=0.5, verbose=1, max_iter=5000)

# fit model
lad_model_AT.fit(x, y)

# predict
train_y_pred = lad_model_AT.predict(x)
y_pred = lad_model_AT.predict(test_x) + 1

# evaluate
quadratic_weighted_kappa(train_y_pred, y)
# =============================================================================
# Pure regression based