Example #1
0
def test(model, test_loader, losses, epoch, nr_query=Config.nr_query):
    '''
    return: cmc1, mAP
    test model on testset and save result to log.
    '''
    val_start_time = time.time()
    model.eval()
    logger.info('testing', 'Start testing')
    all_features, all_labels, all_cids = [], [], []
    history = collections.defaultdict(list)

    for i, (imgs, labels, cids, types,
            colors) in tqdm(enumerate(test_loader),
                            desc='testing on epoch-{}'.format(epoch),
                            total=len(test_loader)):
        imgs, labels, cids = imgs.cuda(), labels.cuda(), cids.cuda()
        types, colors = types.cuda(), colors.cuda()
        f_norm, p_type, p_color = model(imgs)

        triplet_hard_loss = losses['triplet_hard_loss'](f_norm, labels)

        acc_type = accuracy(p_type, types)[0]
        acc_color = accuracy(p_color, colors)[0]

        history['triplet_hard_loss'].append(float(triplet_hard_loss))
        history['acc_type'].append(float(acc_type))
        history['acc_color'].append(float(acc_color))

        all_features.append(f_norm.cpu().detach().numpy())
        all_labels.append(labels.cpu().detach().numpy())
        all_cids.append(cids.cpu().detach().numpy())

    all_features = np.concatenate(all_features, axis=0)
    all_labels = np.concatenate(all_labels, axis=0)
    all_cids = np.concatenate(all_cids, axis=0)
    q_f, g_f = all_features[:nr_query], all_features[nr_query:]
    q_ids, g_ids = all_labels[:nr_query], all_labels[nr_query:]
    q_cids, g_cids = all_cids[:nr_query], all_cids[nr_query:]

    print('Compute CMC and mAP')
    distance_matrix = get_L2distance_matrix_numpy(q_f, g_f)
    cmc, mAP = get_cmc_map(distance_matrix, q_ids, g_ids, q_cids, g_cids)
    val_end_time = time.time()
    time_spent = sec2min_sec(val_start_time, val_end_time)

    text = 'Finish testing epoch {:>3}, time spent: [{:>3}mins{:>3}s], performance:\n##'.format(
        epoch, time_spent[0], time_spent[1])
    text += '|CMC1:{:>5.4f} |mAP:{:>5.4f}'.format(cmc[0], mAP)
    for k, vlist in history.items():
        v = float(mean(vlist))
        text += '|{}:{:>5.4f} '.format(k, v)
        logger.add_scalar('TEST/' + k, v, epoch)
    logger.info('testing', text)

    logger.add_scalar('TEST/cmc1', cmc[0], epoch)
    logger.add_scalar('TEST/cmc5', cmc[4], epoch)
    logger.add_scalar('TEST/cmc10', cmc[9], epoch)
    logger.add_scalar('TEST/mAP', mAP, epoch)
    return cmc, mAP
Example #2
0
def prepare(args):
    resume_from_checkpoint = args.resume_from_checkpoint

    prepare_start_time = time.time()
    logger.info('global', 'Start preparing.')
    check_config_dir()
    logger.info('setting', config_info(), time_report=False)

    model = Baseline(num_classes=Config.nr_class)
    logger.info('setting', model_summary(model), time_report=False)
    logger.info('setting', str(model), time_report=False)

    train_transforms = transforms.Compose([
        transforms.Resize(Config.input_shape),
        transforms.RandomApply([
            transforms.ColorJitter(
                brightness=0.3, contrast=0.3, saturation=0.3, hue=0)
        ],
                               p=0.5),
        transforms.RandomHorizontalFlip(),
        transforms.Pad(10),
        transforms.RandomCrop(Config.input_shape),
        transforms.ToTensor(),
        transforms.RandomErasing(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    test_transforms = transforms.Compose([
        transforms.Resize(Config.input_shape),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    trainset = Veri776_train(transforms=train_transforms, need_attr=True)
    testset = Veri776_test(transforms=test_transforms, need_attr=True)

    pksampler = PKSampler(trainset, p=Config.P, k=Config.K)
    train_loader = torch.utils.data.DataLoader(trainset,
                                               batch_size=Config.batch_size,
                                               sampler=pksampler,
                                               num_workers=Config.nr_worker,
                                               pin_memory=True)
    test_loader = torch.utils.data.DataLoader(
        testset,
        batch_size=Config.batch_size,
        sampler=torch.utils.data.SequentialSampler(testset),
        num_workers=Config.nr_worker,
        pin_memory=True)

    weight_decay_setting = parm_list_with_Wdecay(model)
    optimizer = torch.optim.Adam(weight_decay_setting, lr=Config.lr)
    scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer,
                                                  lr_lambda=lr_multi_func)

    losses = {}
    losses['cross_entropy_loss'] = torch.nn.CrossEntropyLoss()
    losses['type_ce_loss'] = torch.nn.CrossEntropyLoss()
    losses['color_ce_loss'] = torch.nn.CrossEntropyLoss()
    losses['triplet_hard_loss'] = triplet_hard_loss(
        margin=Config.triplet_margin)

    for k in losses.keys():
        losses[k] = losses[k].cuda()

    start_epoch = 0
    if resume_from_checkpoint and os.path.exists(Config.checkpoint_path):
        checkpoint = load_checkpoint()
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        scheduler.load_state_dict(checkpoint['scheduler'])

    # continue training for next the epoch of the checkpoint, or simply start from 1
    start_epoch += 1

    ret = {
        'start_epoch': start_epoch,
        'model': model,
        'train_loader': train_loader,
        'test_loader': test_loader,
        'optimizer': optimizer,
        'scheduler': scheduler,
        'losses': losses
    }

    prepare_end_time = time.time()
    time_spent = sec2min_sec(prepare_start_time, prepare_end_time)
    logger.info(
        'global', 'Finish preparing, time spend: {}mins {}s.'.format(
            time_spent[0], time_spent[1]))

    return ret
Example #3
0
def start(
    model,
    train_loader,
    test_loader,
    optimizer,
    scheduler,
    losses,
    start_epoch,
):
    train_start_time = time.time()

    best_mAP = 0.0
    best_mAP_epoch = 0
    best_top1 = 0.0
    best_top1_epoch = 0

    logger.info('global', 'Start training.')
    for epoch in range(start_epoch, Config.epoch + 1):
        train_one_epoch(model, train_loader, losses, optimizer, scheduler,
                        epoch)

        if epoch % Config.epoch_per_test == 0:
            cmc, mAP = test(model, test_loader, losses, epoch)
            top1 = cmc[0]
            if top1 > best_top1:
                best_top1 = top1
                best_top1_epoch = epoch
            if mAP > best_mAP:
                best_mAP = mAP
                best_mAP_epoch = epoch

        if epoch % Config.epoch_per_save == 0:
            if Config.epoch_per_test % Config.epoch_per_save != 0:
                cmc, mAP = test(model, test_loader, losses, epoch)
            file_name = 'epoch-{:0>3}'.format(epoch) + '.pth'
            save_dict = {
                'model': model.state_dict(),
                'optimizer': optimizer.state_dict(),
                'scheduler': scheduler.state_dict(),
                'top1': cmc[0],
                'mAP': mAP
            }
            path = os.path.join(Config.model_dir, file_name)
            logger.info('global', 'Save model to {}'.format(path))
            torch.save(save_dict, path)

        save_dict = {
            'epoch': epoch,
            'model': model.state_dict(),
            'optimizer': optimizer.state_dict(),
            'scheduler': scheduler.state_dict()
        }
        save_checkpoint(save_dict)
    train_end_time = time.time()
    time_spent = sec2min_sec(train_start_time, train_end_time)

    text = 'Finish training, time spent: {:>3}mins {:>3}s'.format(
        time_spent[0], time_spent[1])
    logger.info('global', text)
    text = '##FINISH## best mAP:{:>5.4f} in epoch {:>3}; best top1:{:>5.4f} in epoch{:>3}'.format(
        best_mAP, best_mAP_epoch, best_top1, best_top1_epoch)
    logger.info('global', text)
Example #4
0
def train_one_epoch(model, train_loader, losses, optimizer, scheduler, epoch):
    global batch_step

    epoch_start_time = time.time()
    logger.info(
        'training', 'Start training epoch-{}, lr={:.6}'.format(
            epoch, get_lr_from_optim(optimizer)))

    scaler = amp.GradScaler()
    model.train()
    history = collections.defaultdict(list)
    for i, (imgs, labels, _, types, colors) in enumerate(train_loader):

        batch = i + 1
        batch_start_time = time.time()

        imgs, labels = imgs.cuda(), labels.cuda()
        types, colors = types.cuda(), colors.cuda()

        with amp.autocast():
            f_bn, p, p_type, p_color = model(imgs)
            ce_loss = losses['cross_entropy_loss'](p, labels)
            triplet_hard_loss = losses['triplet_hard_loss'](f_bn, labels)
            type_ce_loss = losses['type_ce_loss'](p_type, types)
            color_ce_loss = losses['color_ce_loss'](p_color, colors)
            loss = Config.weight_ce * ce_loss
            loss += Config.weight_tri * triplet_hard_loss
            loss += Config.w_type * type_ce_loss
            loss += Config.w_color * color_ce_loss

        scaler.scale(loss).backward()

        scaler.step(optimizer)
        scaler.update()

        optimizer.zero_grad()

        acc = accuracy(p, labels)[0]
        acc_type = accuracy(p_type, types)[0]
        acc_color = accuracy(p_color, colors)[0]
        batch_end_time = time.time()
        time_spent = batch_end_time - batch_start_time

        dist_ap, dist_an = losses['triplet_hard_loss'].get_mean_hard_dist()
        perform = {
            'ce': float(Config.weight_ce * ce_loss),
            'tri_h': float(Config.weight_tri * triplet_hard_loss),
            'type_ce': float(Config.w_type * type_ce_loss),
            'color_ce': float(Config.w_color * color_ce_loss),
            'dap_h': float(dist_ap),
            'dan_h': float(dist_an),
            'acc': float(acc),
            'acc_type': float(acc_type),
            'acc_color': float(acc_color),
            'time(s)': float(time_spent)
        }

        if i % Config.batch_per_log == 0:
            stage = (epoch, batch)
            text = ''
            for k, v in perform.items():
                text += '|{}:{:<8.4f} '.format(k, float(v))
            logger.info('training', text, stage=stage)

        for k, v in perform.items():
            history[k].append(float(v))
            if k != 'time(s)':
                logger.add_scalar('TRAIN_b/' + k, v, batch_step)
        batch_step += 1

    scheduler.step()

    epoch_end_time = time.time()
    time_spent = sec2min_sec(epoch_start_time, epoch_end_time)

    text = 'Finish training epoch {}, time spent: {}mins {}secs, performance:\n##'.format(
        epoch, time_spent[0], time_spent[1])
    for k, vlist in history.items():
        v = mean(vlist)
        text += '|{}:{:>5.4f} '.format(k, v)
        if k != 'time(s)':
            logger.add_scalar('TRAIN_e/' + k, v, epoch)
    logger.info('training', text)
Example #5
0
def test(model, test_loader, losses, epoch, nr_query=Config.nr_query):
    '''
    return: cmc1, mAP
    test model on testset and save result to log.
    '''
    val_start_time = time.time()
    model.eval()
    logger.info('testing', 'Start testing')
    all_features_gpu, all_labels, all_cids, all_mask_gpu = [], [], [], []
    all_attn_dist = []
    history = collections.defaultdict(list)

    for i, (imgs, labels,
            cids) in tqdm(enumerate(test_loader),
                          desc='testing on epoch-{}'.format(epoch),
                          total=len(test_loader)):
        imgs, labels, cids = imgs.cuda(), labels.cuda(), cids.cuda()
        f_norm, f_mask = model(imgs)
        triplet_hard_loss, dist = losses['triplet_hard_loss'](f_norm, f_mask,
                                                              labels)
        history['triplet_hard_loss'].append(float(triplet_hard_loss))
        all_features_gpu.append(f_norm)
        all_labels.append(labels.cpu().detach().numpy())
        all_cids.append(cids.cpu().detach().numpy())
        all_mask_gpu.append(f_mask)
        all_attn_dist.append(dist.cpu().detach().numpy())

    all_features_gpu = torch.cat(all_features_gpu, axis=0)
    all_features = all_features_gpu.cpu().detach().numpy()
    all_labels = np.concatenate(all_labels, axis=0)
    all_cids = np.concatenate(all_cids, axis=0)
    all_mask_gpu = torch.cat(all_mask_gpu, axis=0)

    q_f_gpu, g_f_gpu = all_features_gpu[:nr_query], all_features_gpu[nr_query:]
    q_f, g_f = all_features[:nr_query], all_features[nr_query:]
    q_ids, g_ids = all_labels[:nr_query], all_labels[nr_query:]
    q_cids, g_cids = all_cids[:nr_query], all_cids[nr_query:]
    q_mask, g_mask = all_mask_gpu[:nr_query], all_mask_gpu[nr_query:]

    print('Compute CMC and mAP')
    attn_distance_matrix = get_L2distance_matrix_attn_batch(
        q_f_gpu, g_f_gpu, q_mask, g_mask, temp=Config.temperature)
    attn_distance_matrix = attn_distance_matrix.cpu().detach().numpy()
    distance_matrix = get_L2distance_matrix_numpy(q_f, g_f)
    cmc_a, mAP_a = get_cmc_map(attn_distance_matrix, q_ids, g_ids, q_cids,
                               g_cids)
    cmc, mAP = get_cmc_map(distance_matrix, q_ids, g_ids, q_cids, g_cids)
    val_end_time = time.time()
    time_spent = sec2min_sec(val_start_time, val_end_time)

    text = 'Finish testing epoch {:>3}, time spent: [{:>3}mins{:>3}s], performance:\n##'.format(
        epoch, time_spent[0], time_spent[1])
    text += 'With ATTENTION> |CMC1:{:>5.4f} |mAP:{:>5.4f}  '.format(
        cmc_a[0], mAP_a)
    text += 'W/O attention> |CMC1:{:>5.4f} |mAP:{:>5.4f}'.format(cmc[0], mAP)
    for k, vlist in history.items():
        v = float(mean(vlist))
        text += '|{}:{:>5.4f} '.format(k, v)
        logger.add_scalar('TEST/' + k, v, epoch)
    logger.info('testing', text)

    logger.add_scalar('TEST/cmc1_a', cmc_a[0], epoch)
    logger.add_scalar('TEST/cmc5_a', cmc_a[4], epoch)
    logger.add_scalar('TEST/mAP_a', mAP_a, epoch)
    logger.add_scalar('TEST/cmc1', cmc[0], epoch)
    logger.add_scalar('TEST/cmc5', cmc[4], epoch)
    logger.add_scalar('TEST/mAP', mAP, epoch)
    return cmc_a, mAP_a, cmc, mAP
Example #6
0
def train_one_epoch(model, branches, nr_mask, train_loader, losses, optimizer,
                    scheduler, epoch):
    global batch_step

    epoch_start_time = time.time()
    logger.info(
        'training', 'Start training epoch-{}, lr={:.6}'.format(
            epoch, get_lr_from_optim(optimizer)))

    scaler = amp.GradScaler()
    model.train()
    for branch in branches:
        branch.train()
    history = collections.defaultdict(list)
    for i, (imgs, labels, masks) in enumerate(train_loader):

        batch = i + 1
        batch_start_time = time.time()

        imgs, labels, masks = imgs.cuda(), labels.cuda(), masks.float().cuda()

        with amp.autocast():
            loss = 0
            parsing_celoss = [0] * nr_mask
            parsing_triloss = [0] * nr_mask
            x = model(imgs)
            weights = get_weight(masks)
            f_main, p = branches[0](x)
            ce_loss = losses['cross_entropy_loss'][0](p, labels)
            triplet_hard_loss = losses['triplet_hard_loss'][0](f_main, labels)
            loss += Config.weight_ce[0] * ce_loss
            loss += Config.weight_tri[0] * triplet_hard_loss
            for b, branch in enumerate(branches):
                if b == 0:  # main branch
                    continue
                mask = masks[:, b - 1:b, ...]
                f, logit = branch(x, mask)
                w = weights[:, b - 1]
                pce_loss = losses['cross_entropy_loss'][b](logit, labels, w)
                # ptriplet_hard_loss = losses['triplet_hard_loss'][b](
                #     f, w, labels)
                # ptriplet_hard_loss = losses['triplet_hard_loss'][0](
                #     f, labels)
                parsing_celoss[b - 1] = Config.weight_ce[b] * pce_loss
                # parsing_triloss += Config.weight_tri[b] * ptriplet_hard_loss
            loss = loss + sum(parsing_celoss) + sum(parsing_triloss)

        scaler.scale(loss).backward()

        scaler.step(optimizer)
        scaler.update()

        optimizer.zero_grad()

        acc = accuracy(p, labels)[0]
        batch_end_time = time.time()
        time_spent = batch_end_time - batch_start_time
        dist_ap, dist_an = losses['triplet_hard_loss'][0].get_mean_hard_dist()
        perform = {}
        for i in range(nr_mask):
            if i == 2: continue  # skip top-side loss
            perform.update({'p%d_ce' % (i + 1): float(parsing_celoss[i])})
        for i in range(nr_mask):
            if i == 2: continue  # skip top-side loss
            perform.update({'p%d_tri' % (i + 1): float(parsing_triloss[i])})
        perform.update({
            'ce': float(Config.weight_ce[0] * ce_loss),
            'tri': float(Config.weight_tri[0] * triplet_hard_loss),
            'dap': float(dist_ap),
            'dan': float(dist_an),
            'acc': float(acc),
            't': float(time_spent)
        })

        if i % Config.batch_per_log == 0:
            stage = (epoch, batch)
            text = ''
            for k, v in perform.items():
                text += '|{}:{:<6.4f} '.format(k, float(v))
            logger.info('training', text, stage=stage)

        for k, v in perform.items():
            history[k].append(float(v))
            if k != 'time(s)':
                logger.add_scalar('TRAIN_b/' + k, v, batch_step)
        batch_step += 1

    scheduler.step()

    epoch_end_time = time.time()
    time_spent = sec2min_sec(epoch_start_time, epoch_end_time)

    text = 'Finish training epoch {}, time spent: {}mins {}secs, performance:\n##'.format(
        epoch, time_spent[0], time_spent[1])
    for k, vlist in history.items():
        v = mean(vlist)
        text += '|{}:{:>5.4f} '.format(k, v)
        if k != 'time(s)':
            logger.add_scalar('TRAIN_e/' + k, v, epoch)
    logger.info('training', text)
Example #7
0
def test(model,
         bclassifier,
         test_loader,
         losses,
         epoch,
         nr_query=Config.nr_query):
    '''
    return: cmc1, mAP
    test model on testset and save result to log.
    '''
    val_start_time = time.time()
    model.eval()
    bclassifier.eval()
    logger.info('testing', 'Start testing')
    all_features_gpu, all_labels, all_cids, all_mask = [], [], [], []

    for i, (imgs, labels, cids) in tqdm(enumerate(test_loader),
                                        desc='extracting features',
                                        total=len(test_loader)):
        imgs, labels, cids = imgs.cuda(), labels.cuda(), cids.cuda()
        f_norm, f_mask = model(imgs)
        all_features_gpu.append(f_norm)
        all_labels.append(labels)
        all_mask.append(f_mask)
        all_cids.append(cids)

    features = torch.cat(all_features_gpu, axis=0)
    alllabels = torch.cat(all_labels, axis=0)
    allmask = torch.cat(all_mask, axis=0)
    allcids = torch.cat(all_cids, axis=0)

    q_f_gpu, g_f_gpu = features[:nr_query, ...], features[nr_query:, ...]
    q_ids, g_ids = alllabels[:nr_query, ...], alllabels[nr_query:, ...]
    q_cids, g_cids = allcids[:nr_query], allcids[nr_query:]
    q_mask, g_mask = allmask[:nr_query], allmask[nr_query:]

    pk_pros = []
    for start in tqdm(range(0, q_f_gpu.shape[0], Config.eval_P),
                      desc='computing similarity'):
        end = min(start + Config.eval_P, q_f_gpu.shape[0])
        p_pros = []
        for kstart in range(0, g_f_gpu.shape[0], Config.eval_K):
            kend = min(kstart + Config.eval_K, g_f_gpu.shape[0])
            pros = bclassifier(q_f_gpu[start:end, ...], g_f_gpu[kstart:kend,
                                                                ...])
            pros = pros.reshape(end - start, kend - kstart, 2)
            p_pros.append(pros)
        p_pros = torch.cat(p_pros, axis=1)
        pk_pros.append(p_pros)
    pros = torch.cat(pk_pros, axis=0)
    pairlabels = idlabel2pairlabel(q_ids, g_ids)
    acc = accuracy(pros.reshape(-1, 2), pairlabels)[0]
    distance_matrix = pros[:, :, 0].cpu().detach().numpy()
    q_ids = q_ids.cpu().detach().numpy()
    g_ids = g_ids.cpu().detach().numpy()
    q_cids = q_cids.cpu().detach().numpy()
    g_cids = g_cids.cpu().detach().numpy()

    print('Compute CMC and mAP')
    cmc, mAP = get_cmc_map(distance_matrix, q_ids, g_ids, q_cids, g_cids)
    val_end_time = time.time()
    time_spent = sec2min_sec(val_start_time, val_end_time)

    text = 'Finish testing epoch {:>3}, time spent: [{:>3}mins{:>3}s], performance:\n##'.format(
        epoch, time_spent[0], time_spent[1])
    text += 'W/O attention> |CMC1:{:>5.4f} |mAP:{:>5.4f} |ACC:{:>5.4f} '.format(
        cmc[0], mAP, acc)
    logger.info('testing', text)

    logger.add_scalar('TEST/cmc1', cmc[0], epoch)
    logger.add_scalar('TEST/cmc5', cmc[4], epoch)
    logger.add_scalar('TEST/mAP', mAP, epoch)
    logger.add_scalar('TEST/acc', acc, epoch)
    return cmc, mAP, acc
Example #8
0
def train_one_epoch(model, bclassifier, train_loader, losses, optimizer,
                    scheduler, epoch):
    global batch_step

    epoch_start_time = time.time()
    logger.info(
        'training', 'Start training epoch-{}, lr={:.6}'.format(
            epoch, get_lr_from_optim(optimizer)))

    scaler = amp.GradScaler()
    model.train()
    bclassifier.train()
    history = collections.defaultdict(list)
    for i, (imgs, labels) in enumerate(train_loader):
        batch = i + 1
        batch_start_time = time.time()

        imgs, labels = imgs.cuda(), labels.cuda()
        pair_labels = idlabel2pairlabel(labels, labels)
        with amp.autocast():
            f_bn, p, f_mask = model(imgs)
            p_same = bclassifier(f_bn, f_bn)
            id_loss = losses['cross_entropy_loss'](p, labels)
            pair_loss = losses['pair_loss'](p_same, pair_labels)
            loss = Config.weight_ce * id_loss
            loss += Config.weight_pairloss * pair_loss

        scaler.scale(loss).backward()

        scaler.step(optimizer)
        scaler.update()
        optimizer.zero_grad()

        sortmask = torch.sort(f_mask, dim=1)[0]
        bottom20mean = sortmask[:, :20].mean()
        top20mean = sortmask[:, -20:].mean()

        acc = accuracy(p, labels)[0]
        acc_pair = accuracy(p_same, pair_labels)[0]
        batch_end_time = time.time()
        time_spent = batch_end_time - batch_start_time

        perform = {
            'id_loss': float(Config.weight_ce * id_loss),
            'pair_loss': float(Config.weight_pairloss * pair_loss),
            'acc_id': float(acc),
            'acc_pair': float(acc_pair),
            #    'mtop20': float(top20mean),
            #    'mbot20': float(bottom20mean),
            'time(s)': float(time_spent)
        }

        if i % Config.batch_per_log == 0:
            stage = (epoch, batch)
            text = ''
            for k, v in perform.items():
                text += '|{}:{:<8.4f} '.format(k, float(v))
            logger.info('training', text, stage=stage)

        for k, v in perform.items():
            history[k].append(float(v))
            if k != 'time(s)':
                logger.add_scalar('TRAIN_b/' + k, v, batch_step)
        batch_step += 1

    scheduler.step()

    epoch_end_time = time.time()
    time_spent = sec2min_sec(epoch_start_time, epoch_end_time)

    text = 'Finish training epoch {}, time spent: {}mins {}secs, performance:\n##'.format(
        epoch, time_spent[0], time_spent[1])
    for k, vlist in history.items():
        v = mean(vlist)
        text += '|{}:{:>5.4f} '.format(k, v)
        if k != 'time(s)':
            logger.add_scalar('TRAIN_e/' + k, v, epoch)
    logger.info('training', text)