Exemple #1
0
def main():
    args = get_args()

    writer = SummaryWriter(args.work_dir)
    timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    log_file = osp.join(args.work_dir, '{}.log'.format(timestamp))
    logger = get_root_logger(log_file)

    train_dataset = Dataset(dataset=args.train_dataset,
                            split='train',
                            crop_cfg=dict(type='random',
                                          patch_size=args.patch_size),
                            flip_and_rotate=True)
    val_dataset = Dataset(dataset=args.valid_dataset,
                          split='valid',
                          override_length=args.num_valids,
                          crop_cfg=None)
    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False)

    model = EDSR(num_blocks=args.num_blocks, channels=args.num_channels)
    loss_fn = tf.keras.losses.MeanAbsoluteError()
    optimizer = tf.keras.optimizers.Adam(args.learning_rate)

    best_psnr = 0

    for epoch in range(1, args.num_epochs + 1):
        losses = []
        for lr, hr in tqdm(train_loader):
            lr = tf.constant(lr, dtype=tf.float32)
            hr = tf.constant(hr, dtype=tf.float32)
            with tf.GradientTape() as tape:
                sr = model(lr)
                loss = loss_fn(hr, sr)
            gradients = tape.gradient(loss, model.trainable_variables)
            optimizer.apply_gradients(zip(gradients,
                                          model.trainable_variables))

            losses.append(loss.numpy())
        logger.info(f'Epoch {epoch} - loss: {np.mean(losses)}')
        writer.add_scalar('loss', np.mean(losses), epoch)

        # eval
        if epoch % args.eval_freq == 0 or epoch == args.num_epochs:
            logger.info('Evaluating...')
            psnrs = []
            for i, (lr, hr) in enumerate(val_loader):
                lr = tf.constant(lr, dtype=tf.float32)
                hr = tf.constant(hr, dtype=tf.float32)
                sr = model(lr)
                cur_psnr = compute_psnr(sr, hr)
                psnrs.append(cur_psnr)
                update_tfboard(writer, i, lr, hr, sr, epoch)
            psnr = np.mean(psnrs)
            if psnr > best_psnr:
                best_psnr = psnr
            model.save_weights(osp.join(args.work_dir, f'epoch_{epoch}'))
            logger.info('psnr: {:.2f} (best={:.2f})'.format(psnr, best_psnr))
            writer.add_scalar('psnr', psnr, epoch)
            writer.flush()
Exemple #2
0
import json
import multiprocessing
import os
import pickle

import cv2
import numpy as np
import shapely.geometry as shgeo
from utils import json_load, get_root_logger, json_dump

logger = get_root_logger()


def divide(process_id, files, cfg):
    output_dir = cfg.output_dir
    thresh = cfg.thresh
    size = cfg.size
    overlap_size_cfg = cfg.overlap_size
    prefix = cfg.prefix
    min_box_size = cfg.min_box_size
    padding = cfg.padding
    assert size > overlap_size_cfg

    result = {}
    result['train'] = []
    result['val'] = []

    for k, v in files.items():

        id_ = v['id']
        logger.info(f'process: {process_id}, id: {id_}, path: {k}')
Exemple #3
0
def main():
    logging = get_root_logger(args.log_path, mode='a')
    logging.info('Command Line Arguments:')
    for key, i in vars(args).items():
        logging.info(key + ' = ' + str(i))
    logging.info('End Command Line Arguments')

    batch_size = args.batch_size
    num_epochs = args.num_epochs

    resume_from = args.resume_from
    steps_per_checkpoint = args.steps_per_checkpoint

    gpu_id = args.gpu_id

    configure_process(args, gpu_id)
    if gpu_id > -1:
        logging.info('Using CUDA on GPU ' + str(gpu_id))
        args.cuda = True
    else:
        logging.info('Using CPU')
        args.cuda = False

    '''Load data'''
    logging.info('Data base dir ' + args.data_base_dir)
    logging.info('Loading vocab from ' + args.vocab_file)
    with open(args.vocab_file, "r", encoding='utf-8') as f:
        args.target_vocab_size = len(f.readlines()) + 4
    logging.info('Load training data from ' + args.data_path)
    train_data = UIDataset(args.data_base_dir, args.data_path, args.label_path, args.vocab_file)
    train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True,
                              num_workers=2, drop_last=True, collate_fn=collate_fn)

    logging.info('Load validation data from ' + args.val_data_path)
    val_data = UIDataset(args.data_base_dir, args.val_data_path, args.label_path, args.vocab_file)
    val_loader = DataLoader(val_data, batch_size=batch_size, shuffle=True,
                            num_workers=2, drop_last=True, collate_fn=collate_fn)

    # Build model
    logging.info('Building model')
    if args.resume_from:
        logging.info('Loading checkpoint from %s' % resume_from)
        checkpoint = torch.load(resume_from)
    else:
        checkpoint = None
        logging.info('Creating model with fresh parameters')
    model = build_model(args, gpu_id, checkpoint)
    logging.info(model)

    n_params, enc, dec = cal_parameters(model)
    logging.info('encoder: %d' % enc)
    logging.info('decoder: %d' % dec)
    logging.info('number of parameters: %d' % n_params)

    # Build optimizer
    optimier = torch.optim.SGD(model.parameters(), lr=args.learning_rate)
    optim = Optimizer(optimier)
    if checkpoint:
        optim.load_state_dict(checkpoint['optim'])
        optim.training_step += 1

    # Build model saver
    model_saver = ModelSaver(args.model_dir, model, optim)

    train(model, optim, model_saver, num_epochs, train_loader, val_loader, steps_per_checkpoint,
          args.valid_steps, args.lr_decay, args.start_decay_at, args.cuda)