Beispiel #1
0
def job(tuning, params_path, devices, resume, save_interval):
    global params
    if tuning:
        with open(params_path, 'r') as f:
            params = json.load(f)
        mode_str = 'tuning'
        setting = '_'.join(f'{tp}-{params[tp]}'
                           for tp in params['tuning_params'])
    else:
        mode_str = 'train'
        setting = ''

    # パラメーターを変えるときにseedも変えたい(seed averagingの効果を期待)
    seed = sum(ord(_) for _ in str(params.values()))
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.benchmark = False

    exp_path = ROOT + f'experiments/{params["ex_name"]}/'
    os.environ['CUDA_VISIBLE_DEVICES'] = devices

    logger, writer = utils.get_logger(
        log_dir=exp_path + f'{mode_str}/log/{setting}',
        tensorboard_dir=exp_path + f'{mode_str}/tf_board/{setting}')

    if params['augmentation'] == 'soft':
        params['scale_limit'] = 0.2
        params['brightness_limit'] = 0.1
    elif params['augmentation'] == 'middle':
        params['scale_limit'] = 0.3
        params['shear_limit'] = 4
        params['brightness_limit'] = 0.1
        params['contrast_limit'] = 0.1
    else:
        raise ValueError

    train_transform, eval_transform = data_utils.build_transforms(
        scale_limit=params['scale_limit'],
        shear_limit=params['shear_limit'],
        brightness_limit=params['brightness_limit'],
        contrast_limit=params['contrast_limit'],
    )

    data_loaders = data_utils.make_train_loaders(
        params=params,
        data_root=ROOT + 'input/' + params['data'],
        train_transform=train_transform,
        eval_transform=eval_transform,
        scale='S',
        test_size=0,
        class_topk=params['class_topk'],
        num_workers=8)

    model = models.LandmarkNet(
        n_classes=params['class_topk'],
        model_name=params['model_name'],
        pooling=params['pooling'],
        loss_module=params['loss'],
        s=params['s'],
        margin=params['margin'],
        theta_zero=params['theta_zero'],
        use_fc=params['use_fc'],
        fc_dim=params['fc_dim'],
    ).cuda()
    optimizer = utils.get_optim(params, model)
    criterion = nn.CrossEntropyLoss()
    scheduler = optim.lr_scheduler.CosineAnnealingLR(
        optimizer,
        T_max=params['epochs'] * len(data_loaders['train']),
        eta_min=3e-6)
    start_epoch = 0

    if len(devices.split(',')) > 1:
        model = nn.DataParallel(model)

    for epoch in range(start_epoch, params['epochs']):

        logger.info(
            f'Epoch {epoch}/{params["epochs"]} | lr: {optimizer.param_groups[0]["lr"]}'
        )

        # ============================== train ============================== #
        model.train(True)

        losses = utils.AverageMeter()
        prec1 = utils.AverageMeter()

        for i, (_, x, y) in tqdm(enumerate(data_loaders['train']),
                                 total=len(data_loaders['train']),
                                 miniters=None,
                                 ncols=55):
            x = x.to('cuda')
            y = y.to('cuda')

            outputs = model(x, y)
            loss = criterion(outputs, y)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            scheduler.step()

            acc = metrics.accuracy(outputs, y)
            losses.update(loss.item(), x.size(0))
            prec1.update(acc, x.size(0))

            if i % 100 == 99:
                logger.info(
                    f'{epoch+i/len(data_loaders["train"]):.2f}epoch | {setting} acc: {prec1.avg}'
                )

        train_loss = losses.avg
        train_acc = prec1.avg

        writer.add_scalars('Loss', {'train': train_loss}, epoch)
        writer.add_scalars('Acc', {'train': train_acc}, epoch)
        writer.add_scalar('LR', optimizer.param_groups[0]['lr'], epoch)

        if (epoch + 1) == params['epochs'] or (epoch + 1) % save_interval == 0:
            output_file_name = exp_path + f'ep{epoch}_' + setting + '.pth'
            utils.save_checkpoint(path=output_file_name,
                                  model=model,
                                  epoch=epoch,
                                  optimizer=optimizer,
                                  params=params)

    model = model.module
    datasets = ('roxford5k', 'rparis6k')
    results = eval_datasets(model,
                            datasets=datasets,
                            ms=False,
                            tta_gem_p=1.0,
                            logger=logger)

    if tuning:
        tuning_result = {}
        for d in datasets:
            for key in ['mapE', 'mapM', 'mapH']:
                mapE, mapM, mapH, mpE, mpM, mpH, kappas = results[d]
                tuning_result[d + '-' + key] = [eval(key)]
        utils.write_tuning_result(params, tuning_result,
                                  exp_path + 'tuning/results.csv')
def job(tuning, params_path, devices, resume, save_interval):
    global params
    if tuning:
        with open(params_path, 'r') as f:
            params = json.load(f)
        mode_str = 'tuning'
        setting = '_'.join(f'{tp}-{params[tp]}'
                           for tp in params['tuning_params'])
    else:
        mode_str = 'train'
        setting = ''

    exp_path = ROOT + f'experiments/{params["ex_name"]}/'
    os.environ['CUDA_VISIBLE_DEVICES'] = devices

    if resume is None:
        # C-AIRとABCIで整合性が取れるようにしている。
        params[
            'base_ckpt_path'] = f'experiments/v1only/ep4_augmentation-soft_epochs-5_loss-{params["loss"]}.pth'
        params[
            'clean_path'] = ROOT + f'input/clean/train19_cleaned_verifythresh{params["verifythresh"]}_freqthresh{params["freqthresh"]}.csv'
    else:
        params = utils.load_checkpoint(path=resume, params=True)['params']

    logger, writer = utils.get_logger(
        log_dir=exp_path + f'{mode_str}/log/{setting}',
        tensorboard_dir=exp_path + f'{mode_str}/tf_board/{setting}')

    if params['augmentation'] == 'soft':
        params['scale_limit'] = 0.2
        params['brightness_limit'] = 0.1
    elif params['augmentation'] == 'middle':
        params['scale_limit'] = 0.3
        params['shear_limit'] = 4
        params['brightness_limit'] = 0.1
        params['contrast_limit'] = 0.1
    else:
        raise ValueError

    train_transform, eval_transform = data_utils.build_transforms(
        scale_limit=params['scale_limit'],
        shear_limit=params['shear_limit'],
        brightness_limit=params['brightness_limit'],
        contrast_limit=params['contrast_limit'],
    )

    data_loaders = data_utils.make_train_loaders(
        params=params,
        data_root=ROOT + 'input/' + params['data'],
        train_transform=train_transform,
        eval_transform=eval_transform,
        scale='SS2',
        test_size=0,
        class_topk=params['class_topk'],
        num_workers=8)

    model = models.LandmarkNet(
        n_classes=params['class_topk'],
        model_name=params['model_name'],
        pooling=params['pooling'],
        loss_module=params['loss'],
        s=params['s'],
        margin=params['margin'],
        theta_zero=params['theta_zero'],
        use_fc=params['use_fc'],
        fc_dim=params['fc_dim'],
    ).cuda()

    criterion = nn.CrossEntropyLoss()
    optimizer = utils.get_optim(params, model)

    if resume is None:
        sdict = torch.load(ROOT + params['base_ckpt_path'])['state_dict']
        if params['loss'] == 'adacos':
            del sdict['final.W']  # remove fully-connected layer
        elif params['loss'] == 'softmax':
            del sdict['final.weight'], sdict[
                'final.bias']  # remove fully-connected layer
        else:
            del sdict['final.weight']  # remove fully-connected layer
        model.load_state_dict(sdict, strict=False)

        scheduler = optim.lr_scheduler.CosineAnnealingLR(
            optimizer,
            T_max=params['epochs'] * len(data_loaders['train']),
            eta_min=3e-6)
        start_epoch, end_epoch = (0,
                                  params['epochs'] - params['scaleup_epochs'])
    else:
        ckpt = utils.load_checkpoint(path=resume,
                                     model=model,
                                     optimizer=optimizer,
                                     epoch=True)
        model, optimizer, start_epoch = ckpt['model'], ckpt[
            'optimizer'], ckpt['epoch'] + 1
        end_epoch = params['epochs']

        scheduler = optim.lr_scheduler.CosineAnnealingLR(
            optimizer,
            T_max=params['epochs'] * len(data_loaders['train']),
            eta_min=3e-6,
            last_epoch=start_epoch * len(data_loaders['train']))

        setting += 'scaleup_' + resume.split('/')[-1].replace('.pth', '')

        data_loaders = data_utils.make_verified_train_loaders(
            params=params,
            data_root=ROOT + 'input/' + params['data'],
            train_transform=train_transform,
            eval_transform=eval_transform,
            scale='M2',
            test_size=0,
            num_workers=8)
        batch_norm.freeze_bn(model)

    if len(devices.split(',')) > 1:
        model = nn.DataParallel(model)

    for epoch in range(start_epoch, end_epoch):
        logger.info(f'Epoch {epoch}/{end_epoch}')

        # ============================== train ============================== #
        model.train(True)

        losses = utils.AverageMeter()
        prec1 = utils.AverageMeter()

        for i, (_, x, y) in tqdm(enumerate(data_loaders['train']),
                                 total=len(data_loaders['train']),
                                 miniters=None,
                                 ncols=55):
            x = x.to('cuda')
            y = y.to('cuda')

            outputs = model(x, y)
            loss = criterion(outputs, y)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            scheduler.step()

            acc = metrics.accuracy(outputs, y)
            losses.update(loss.item(), x.size(0))
            prec1.update(acc, x.size(0))

            if i % 100 == 99:
                logger.info(
                    f'{epoch+i/len(data_loaders["train"]):.2f}epoch | {setting} acc: {prec1.avg}'
                )

        train_loss = losses.avg
        train_acc = prec1.avg

        writer.add_scalars('Loss', {'train': train_loss}, epoch)
        writer.add_scalars('Acc', {'train': train_acc}, epoch)
        writer.add_scalar('LR', optimizer.param_groups[0]['lr'], epoch)

        if (epoch + 1) == end_epoch or (epoch + 1) % save_interval == 0:
            output_file_name = exp_path + f'ep{epoch}_' + setting + '.pth'
            utils.save_checkpoint(path=output_file_name,
                                  model=model,
                                  epoch=epoch,
                                  optimizer=optimizer,
                                  params=params)

    model = model.module
    datasets = ('oxford5k', 'paris6k', 'roxford5k', 'rparis6k')
    results = eval_datasets(model,
                            datasets=datasets,
                            ms=True,
                            tta_gem_p=1.0,
                            logger=logger)

    if tuning:
        tuning_result = {}
        for d in datasets:
            if d in ('oxford5k', 'paris6k'):
                tuning_result[d] = results[d]
            else:
                for key in ['mapE', 'mapM', 'mapH']:
                    mapE, mapM, mapH, mpE, mpM, mpH, kappas = results[d]
                    tuning_result[d + '-' + key] = [eval(key)]
        utils.write_tuning_result(params, tuning_result,
                                  exp_path + 'tuning/results.csv')
def job(tuning, params_path, devices, resume, save_interval):
    global params
    if tuning:
        with open(params_path, 'r') as f:
            params = json.load(f)
        mode_str = 'tuning'
        setting = '_'.join(f'{tp}-{params[tp]}'
                           for tp in params['tuning_params'])
    else:
        mode_str = 'train'
        setting = ''

    exp_path = os.path.join(dataset_connector.result_dir,
                            f'{params["ex_name"]}/')
    os.environ['CUDA_VISIBLE_DEVICES'] = devices
    print("CUDA Available:", torch.cuda.is_available(),
          "CUDA_VISIBLE_DEVICES:", os.environ['CUDA_VISIBLE_DEVICES'])
    logger, writer = utils.get_logger(
        log_dir=exp_path + f'{mode_str}/log/{setting}',
        tensorboard_dir=exp_path + f'{mode_str}/tf_board/{setting}')

    if params['augmentation'] == 'soft':
        params['scale_limit'] = 0.2
        params['brightness_limit'] = 0.1
    elif params['augmentation'] == 'middle':
        params['scale_limit'] = 0.3
        params['shear_limit'] = 4
        params['brightness_limit'] = 0.1
        params['contrast_limit'] = 0.1
    else:
        raise ValueError

    train_transform, eval_transform = data_utils.build_transforms(
        scale_limit=params['scale_limit'],
        shear_limit=params['shear_limit'],
        brightness_limit=params['brightness_limit'],
        contrast_limit=params['contrast_limit'],
    )

    data_loaders = data_utils.make_train_loaders(
        params=params,
        data_root=ROOT + 'input/' + params['train_data'],
        use_clean_version=True,
        train_transform=train_transform,
        eval_transform=eval_transform,
        scale='S2',
        test_size=0.1,
        num_workers=os.cpu_count() * 2)

    model = models.LandmarkNet(
        n_classes=params['class_topk'],
        model_name=params['model_name'],
        pooling=params['pooling'],
        loss_module=params['loss'],
        s=params['s'],
        margin=params['margin'],
        theta_zero=params['theta_zero'],
        use_fc=params['use_fc'],
        fc_dim=params['fc_dim'],
    ).cuda()

    criterion = nn.CrossEntropyLoss()
    optimizer = utils.get_optim(params, model)

    # TODO: Missing initial weight file.
    # sdict = torch.load(ROOT + params['base_ckpt_path'])['state_dict']
    # del sdict['final.weight']  # remove fully-connected layer
    # model.load_state_dict(sdict, strict=False)

    scheduler = optim.lr_scheduler.CosineAnnealingLR(
        optimizer,
        T_max=params['epochs'] * len(data_loaders['train']),
        eta_min=3e-6)
    start_epoch, end_epoch = (0, params['epochs'] - params['scaleup_epochs'])

    if len(devices.split(',')) > 1:
        model = nn.DataParallel(model)

    for epoch in range(start_epoch, end_epoch):
        logger.info(f'Epoch {epoch}/{end_epoch}')

        # ============================== train ============================== #
        model.train(True)

        losses = utils.AverageMeter()
        prec1 = utils.AverageMeter()

        for i, (_, x, y) in tqdm(enumerate(data_loaders['train']),
                                 total=len(data_loaders['train']),
                                 miniters=None,
                                 ncols=55):
            x = x.to('cuda')
            y = y.to('cuda')

            outputs = model(x, y)
            loss = criterion(outputs, y)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            scheduler.step()

            acc = metrics.accuracy(outputs, y)
            losses.update(loss.item(), x.size(0))
            prec1.update(acc, x.size(0))
            logger.info("Training Loss:{},Accuracy(Prec1):{}".format(
                loss.item(), acc))
            if i % 100 == 99:
                logger.info(
                    f'{epoch + i / len(data_loaders["train"]):.2f}epoch | {setting} acc: {prec1.avg}'
                )

        train_loss = losses.avg
        train_acc = prec1.avg

        writer.add_scalars('Loss', {'train': train_loss}, epoch)
        writer.add_scalars('Acc', {'train': train_acc}, epoch)
        writer.add_scalar('LR', optimizer.param_groups[0]['lr'], epoch)

        if (epoch + 1) == end_epoch or (epoch + 1) % save_interval == 0:
            output_file_name = exp_path + f'ep{epoch}_' + setting + '.pth'
            utils.save_checkpoint(path=output_file_name,
                                  model=model,
                                  epoch=epoch,
                                  optimizer=optimizer,
                                  params=params)

    model = model.module
    datasets = ('oxford5k', 'paris6k', 'roxford5k', 'rparis6k')
    results = eval_datasets(model,
                            datasets=datasets,
                            ms=True,
                            tta_gem_p=1.0,
                            logger=logger)

    if tuning:
        tuning_result = {}
        for d in datasets:
            if d in ('oxford5k', 'paris6k'):
                tuning_result[d] = results[d]
            else:
                for key in ['mapE', 'mapM', 'mapH']:
                    mapE, mapM, mapH, mpE, mpM, mpH, kappas = results[d]
                    tuning_result[d + '-' + key] = [eval(key)]
        utils.write_tuning_result(params, tuning_result,
                                  exp_path + 'tuning/results.csv')
Beispiel #4
0
def job(tuning, params_path, devices, resume, save_interval):

    global params
    if tuning:
        with open(params_path, 'r') as f:
            params = json.load(f)
        mode_str = 'tuning'
        setting = '_'.join(f'{tp}-{params[tp]}'
                           for tp in params['tuning_params'])
    else:
        mode_str = 'train'
        setting = ''

    exp_path = ROOT + f'experiments/{params["ex_name"]}/'
    os.environ['CUDA_VISIBLE_DEVICES'] = devices

    logger, writer = utils.get_logger(
        log_dir=exp_path + f'{mode_str}/log/{setting}',
        tensorboard_dir=exp_path + f'{mode_str}/tf_board/{setting}')
    train_transform, eval_transform = build_transforms(
        scale_range=params['scale_range'],
        brightness_range=params['brightness_range'])
    data_loaders = data_utils.make_train_loaders(
        params=params,
        data_root=ROOT + 'input/train2018',
        train_transform=train_transform,
        eval_transform=eval_transform,
        class_topk=params['class_topk'],
        num_workers=8)

    model = models.LandmarkFishNet(
        n_classes=params['class_topk'],
        model_name=params['model_name'],
        pooling_strings=params['pooling'].split(','),
        loss_module='arcface',
        s=30.0,
        margin=params['margin'],
        use_fc=params['use_fc'],
        fc_dim=params['fc_dim'],
    ).cuda()
    optimizer = utils.get_optim(params, model)
    criterion = nn.CrossEntropyLoss()
    scheduler = optim.lr_scheduler.CosineAnnealingLR(
        optimizer,
        T_max=params['epochs'] * len(data_loaders['train']),
        eta_min=1e-6)

    if len(devices.split(',')) > 1:
        model = nn.DataParallel(model)
    if resume is not None:
        model, optimizer = utils.load_checkpoint(path=resume,
                                                 model=model,
                                                 optimizer=optimizer)

    for epoch in range(params['epochs']):
        logger.info(
            f'Epoch {epoch}/{params["epochs"]} | lr: {optimizer.param_groups[0]["lr"]}'
        )

        # ============================== train ============================== #
        model.train(True)

        losses = utils.AverageMeter()
        prec1 = utils.AverageMeter()

        for i, (_, x, y) in tqdm(enumerate(data_loaders['train']),
                                 total=len(data_loaders['train']),
                                 miniters=None,
                                 ncols=55):
            x = x.to('cuda')
            y = y.to('cuda')

            outputs = model(x, y)
            loss = criterion(outputs, y)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            scheduler.step()

            acc = metrics.accuracy(outputs, y)
            losses.update(loss.item(), x.size(0))
            prec1.update(acc, x.size(0))

            if i % 100 == 99:
                logger.info(
                    f'{epoch+i/len(data_loaders["train"]):.2f}epoch | {setting} acc: {prec1.avg}'
                )

        train_loss = losses.avg
        train_acc = prec1.avg

        # ============================== validation ============================== #
        model.train(False)
        losses.reset()
        prec1.reset()

        for i, (_, x, y) in tqdm(enumerate(data_loaders['val']),
                                 total=len(data_loaders['val']),
                                 miniters=None,
                                 ncols=55):
            x = x.to('cuda')
            y = y.to('cuda')

            with torch.no_grad():
                outputs = model(x, y)
                loss = criterion(outputs, y)

            acc = metrics.accuracy(outputs, y)
            losses.update(loss.item(), x.size(0))
            prec1.update(acc, x.size(0))

        val_loss = losses.avg
        val_acc = prec1.avg

        logger.info(f'[Val] Loss: \033[1m{val_loss:.4f}\033[0m | '
                    f'Acc: \033[1m{val_acc:.4f}\033[0m\n')

        writer.add_scalars('Loss', {'train': train_loss}, epoch)
        writer.add_scalars('Acc', {'train': train_acc}, epoch)
        writer.add_scalars('Loss', {'val': val_loss}, epoch)
        writer.add_scalars('Acc', {'val': val_acc}, epoch)
        writer.add_scalar('LR', optimizer.param_groups[0]['lr'], epoch)

        if save_interval > 0:
            if (epoch +
                    1) == params['epochs'] or (epoch + 1) % save_interval == 0:
                output_file_name = exp_path + f'ep{epoch}_' + setting + '.pth'
                utils.save_checkpoint(path=output_file_name,
                                      model=model,
                                      epoch=epoch,
                                      optimizer=optimizer,
                                      params=params)

    if tuning:
        tuning_result = {}
        for key in ['train_loss', 'train_acc', 'val_loss', 'val_acc']:
            tuning_result[key] = [eval(key)]
        utils.write_tuning_result(params, tuning_result,
                                  exp_path + 'tuning/results.csv')
Beispiel #5
0
def job(tuning, params_path, devices, resume):
    """
    Example:
        python exp0.py job --devices 0,1 -s
        python exp0.py tuning --devices 0,1 --n-gpu 1 --mode 'random' --n-iter 4
    """

    exp_path = ROOT + f'experiments/{params["ex_name"]}/'
    os.environ['CUDA_VISIBLE_DEVICES'] = devices

    global params
    if tuning:
        with open(params_path, 'r') as f:
            params = json.load(f)
        mode_str = 'tuning'
        setting = '_'.join(f'{tp}-{params[tp]}'
                           for tp in params['tuning_params'])
    else:
        mode_str = 'train'
        setting = ''

    logger, writer = utils.get_logger(
        log_dir=exp_path + f'{mode_str}/log/{setting}',
        tensorboard_dir=exp_path + f'{mode_str}/tf_board/{setting}')

    train_df = pd.read_csv(ROOT + 'data/train.csv')
    train_df, val_df = train_test_split(train_df,
                                        test_size=1024,
                                        random_state=params['seed'])

    model = models.UNet(in_channels=3,
                        n_classes=2,
                        depth=4,
                        ch_first=32,
                        padding=True,
                        batch_norm=False,
                        up_mode='upconv').cuda()

    optimizer = utils.get_optim(model, params)

    if resume is not None:
        model, optimizer = utils.load_checkpoint(model,
                                                 resume,
                                                 optimizer=optimizer)

    if len(devices.split(',')) > 1:
        model = nn.DataParallel(model)

    data_transforms = {
        'train':
        transforms.Compose([
            transforms.ToPILImage(),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
        ]),
        'val':
        transforms.Compose([
            transforms.ToPILImage(),
            transforms.ToTensor(),
            transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
        ]),
    }
    image_datasets = {
        'train': data_utils.CSVDataset(train_df, data_transforms['train']),
        'val': data_utils.CSVDataset(val_df, data_transforms['val'])
    }
    data_loaders = {
        'train':
        DataLoader(image_datasets['train'],
                   batch_size=params['batch_size'],
                   pin_memory=True,
                   shuffle=True,
                   drop_last=True,
                   num_workers=params['workers']),
        'val':
        DataLoader(image_datasets['val'],
                   batch_size=params['test_batch_size'],
                   pin_memory=True,
                   shuffle=False,
                   num_workers=params['workers'])
    }

    criterion = nn.CrossEntropyLoss()
    scheduler = optim.lr_scheduler.MultiStepLR(
        optimizer,
        milestones=[int(params['epochs'] * 0.7),
                    int(params['epochs'] * 0.9)],
        gamma=0.1)

    for epoch in range(params['epochs']):
        logger.info(
            f'Epoch {epoch}/{params["epochs"]} | lr: {optimizer.param_groups[0]["lr"]}'
        )

        # ============================== train ============================== #
        model.train(True)

        losses = utils.AverageMeter()
        prec1 = utils.AverageMeter()

        for i, (x, y) in tqdm(enumerate(data_loaders['train']),
                              total=len(data_loaders['train']),
                              miniters=50):
            x = x.to('cuda:0')
            y = y.to('cuda:0', non_blocking=True)

            outputs = model(x)
            loss = criterion(outputs, y)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            acc = utils.accuracy(outputs, y)
            losses.update(loss.item(), x.size(0))
            prec1.update(acc.item(), x.size(0))

        train_loss = losses.avg
        train_acc = prec1.avg

        # ============================== validation ============================== #
        model.train(False)
        losses.reset()
        prec1.reset()

        for i, (x, y) in tqdm(enumerate(data_loaders['val']),
                              total=len(data_loaders['val'])):
            x = x.cuda()
            y = y.cuda(non_blocking=True)

            with torch.no_grad():
                outputs = model(x)
                loss = criterion(outputs, y)

            acc = utils.accuracy(outputs, y)
            losses.update(loss.item(), x.size(0))
            prec1.update(acc.item(), x.size(0))

        val_loss = losses.avg
        val_acc = prec1.avg

        logger.info(f'[Val] Loss: \033[1m{val_loss:.4f}\033[0m | '
                    f'Acc: \033[1m{val_acc:.4f}\033[0m\n')

        writer.add_scalars('Loss', {'train': train_loss}, epoch)
        writer.add_scalars('Acc', {'train': train_acc}, epoch)
        writer.add_scalars('Loss', {'val': val_loss}, epoch)
        writer.add_scalars('Acc', {'val': val_acc}, epoch)
        writer.add_scalar('LR', optimizer.param_groups[0]['lr'], epoch)

        scheduler.step()

        if not tuning:
            utils.save_checkpoint(model, epoch, exp_path + 'model_optim.pth',
                                  optimizer)

    if tuning:
        tuning_result = {}
        for key in ['train_loss', 'train_acc', 'val_loss', 'val_acc']:
            tuning_result[key] = [eval(key)]
        utils.write_tuning_result(params, tuning_result,
                                  exp_path + 'tuning/results.csv')