示例#1
0
def open_file(value, parent):
    filetype = {}
    if options['client.filetype']:
        if isinstance(options['client.filetype'], str):
            filetype = eval(options['client.filetype'])
        else:
            filetype = options['client.filetype']
    root, ext = os.path.splitext(value)
    cmd = False
    if ext[1:] in filetype:
        cmd = filetype[ext[1:]] % (value)
    if not cmd:
        cmd = file_selection(_('Open with...'),
                parent=parent)
        if cmd:
            cmd = cmd + ' %s'
            filetype[ext[1:]] = cmd
            options['client.filetype'] = filetype
            options.save()
            cmd = cmd % (value)
    if cmd:
        pid = os.fork()
        if not pid:
            pid = os.fork()
            if not pid:
                prog, args = cmd.split(' ', 1)
                args = [os.path.basename(prog)] + args.split(' ')
                try:
                    os.execvp(prog, args)
                except:
                    pass
            time.sleep(0.1)
            sys.exit(0)
        os.waitpid(pid, 0)
示例#2
0
def open_file(value, parent):
    filetype = {}
    if options['client.filetype']:
        if isinstance(options['client.filetype'], str):
            filetype = eval(options['client.filetype'])
        else:
            filetype = options['client.filetype']
    root, ext = os.path.splitext(value)
    cmd = False
    if ext[1:] in filetype:
        cmd = filetype[ext[1:]] % (value)
    if not cmd:
        cmd = file_selection(_('Open with...'),
                parent=parent)
        if cmd:
            cmd = cmd + ' %s'
            filetype[ext[1:]] = cmd
            options['client.filetype'] = filetype
            options.save()
            cmd = cmd % (value)
    if cmd:
        pid = os.fork()
        if not pid:
            pid = os.fork()
            if not pid:
                prog, args = cmd.split(' ', 1)
                args = [os.path.basename(prog)] + args.split(' ')
                try:
                    os.execvp(prog, args)
                except:
                    pass
            time.sleep(0.1)
            sys.exit(0)
        os.waitpid(pid, 0)
示例#3
0
def main():
    # options
    parser = argparse.ArgumentParser()
    parser.add_argument('-opt',
                        type=str,
                        required=True,
                        help='Path to option JSON file.')
    opt = option.parse(parser.parse_args().opt, is_train=True)

    util.mkdir_and_rename(
        opt['path']['experiments_root'])  # rename old experiments if exists
    util.mkdirs((path for key, path in opt['path'].items() if not key == 'experiments_root' and \
        not key == 'pretrain_model_G' and not key == 'pretrain_model_D'))
    option.save(opt)
    opt = option.dict_to_nonedict(
        opt)  # Convert to NoneDict, which return None for missing key.

    # print to file and std_out simultaneously
    sys.stdout = PrintLogger(opt['path']['log'])

    # random seed
    seed = opt['train']['manual_seed']
    if seed is None:
        seed = random.randint(1, 10000)
    print("Random Seed: ", seed)
    random.seed(seed)
    torch.manual_seed(seed)

    # create train and val dataloader
    for phase, dataset_opt in opt['datasets'].items():
        if phase == 'train':
            train_set = create_dataset(dataset_opt)
            train_size = int(
                math.ceil(len(train_set) / dataset_opt['batch_size']))
            print('Number of train images: {:,d}, iters: {:,d}'.format(
                len(train_set), train_size))
            total_iters = int(opt['train']['niter'])
            total_epoches = int(math.ceil(total_iters / train_size))
            print('Total epoches needed: {:d} for iters {:,d}'.format(
                total_epoches, total_iters))
            train_loader = create_dataloader(train_set, dataset_opt)
        elif phase == 'val':
            val_dataset_opt = dataset_opt
            val_set = create_dataset(dataset_opt)
            val_loader = create_dataloader(val_set, dataset_opt)
            print('Number of val images in [{:s}]: {:d}'.format(
                dataset_opt['name'], len(val_set)))
        else:
            raise NotImplementedError(
                'Phase [{:s}] is not recognized.'.format(phase))
    assert train_loader is not None

    # Create model
    model = create_model(opt)
    # create logger
    logger = Logger(opt)

    current_step = 0
    start_time = time.time()
    print('---------- Start training -------------')
    for epoch in range(total_epoches):
        for i, train_data in enumerate(train_loader):
            current_step += 1
            if current_step > total_iters:
                break

            # training
            model.feed_data(train_data)
            model.optimize_parameters(current_step)

            time_elapsed = time.time() - start_time
            start_time = time.time()

            # log
            if current_step % opt['logger']['print_freq'] == 0:
                logs = model.get_current_log()
                print_rlt = OrderedDict()
                print_rlt['model'] = opt['model']
                print_rlt['epoch'] = epoch
                print_rlt['iters'] = current_step
                print_rlt['time'] = time_elapsed
                for k, v in logs.items():
                    print_rlt[k] = v
                print_rlt['lr'] = model.get_current_learning_rate()
                logger.print_format_results('train', print_rlt)

            # save models
            if current_step % opt['logger']['save_checkpoint_freq'] == 0:
                print('Saving the model at the end of iter {:d}.'.format(
                    current_step))
                model.save(current_step)

            # validation
            if current_step % opt['train']['val_freq'] == 0:
                print('---------- validation -------------')
                start_time = time.time()

                avg_psnr = 0.0
                idx = 0
                for val_data in val_loader:
                    idx += 1
                    img_name = os.path.splitext(
                        os.path.basename(val_data['LR_path'][0]))[0]
                    img_dir = os.path.join(opt['path']['val_images'], img_name)
                    util.mkdir(img_dir)

                    model.feed_data(val_data)
                    model.test()

                    visuals = model.get_current_visuals()
                    sr_img = util.tensor2img(visuals['SR'])  # uint8
                    gt_img = util.tensor2img(visuals['HR'])  # uint8

                    # Save SR images for reference
                    save_img_path = os.path.join(img_dir, '{:s}_{:d}.png'.format(\
                        img_name, current_step))
                    util.save_img(sr_img, save_img_path)

                    # calculate PSNR
                    if opt['crop_scale'] is not None:
                        crop_size = opt['crop_scale']
                    else:
                        crop_size = opt['scale']
                    if crop_size <= 0:
                        cropped_sr_img = sr_img.copy()
                        cropped_gt_img = gt_img.copy()
                    else:
                        if len(gt_img.shape) < 3:
                            cropped_sr_img = sr_img[crop_size:-crop_size,
                                                    crop_size:-crop_size]
                            cropped_gt_img = gt_img[crop_size:-crop_size,
                                                    crop_size:-crop_size]
                        else:
                            cropped_sr_img = sr_img[crop_size:-crop_size,
                                                    crop_size:-crop_size, :]
                            cropped_gt_img = gt_img[crop_size:-crop_size,
                                                    crop_size:-crop_size, :]
                    #avg_psnr += util.psnr(cropped_sr_img, cropped_gt_img)
                    cropped_sr_img_y = bgr2ycbcr(cropped_sr_img, only_y=True)
                    cropped_gt_img_y = bgr2ycbcr(cropped_gt_img, only_y=True)
                    avg_psnr += util.psnr(
                        cropped_sr_img_y,
                        cropped_gt_img_y)  ##########only y channel

                avg_psnr = avg_psnr / idx
                time_elapsed = time.time() - start_time
                # Save to log
                print_rlt = OrderedDict()
                print_rlt['model'] = opt['model']
                print_rlt['epoch'] = epoch
                print_rlt['iters'] = current_step
                print_rlt['time'] = time_elapsed
                print_rlt['psnr'] = avg_psnr
                logger.print_format_results('val', print_rlt)
                print('-----------------------------------')

            # update learning rate
            model.update_learning_rate()

    print('Saving the final model.')
    model.save('latest')
    print('End of training.')
示例#4
0
# options
parser = argparse.ArgumentParser()
parser.add_argument('-opt',
                    type=str,
                    required=True,
                    help='Path to options JSON file.')
args = parser.parse_args()
options_path = args.opt
opt = option.parse(options_path, is_train=True)

util.mkdir_and_rename(
    opt['path']['experiments_root'])  # rename old experiments if exists
util.mkdirs((path for key, path in opt['path'].items() if not key == 'experiments_root' and \
    not key == 'pretrain_model_G' and not key == 'pretrain_model_D'))
option.save(opt)  # save option file to the opt['path']['options']
opt = option.dict_to_nonedict(
    opt)  # Convert to NoneDict, which return None for missing key.


# print to file and std_out simultaneously
class PrintLogger(object):
    def __init__(self):
        self.terminal = sys.stdout
        self.log = open(os.path.join(opt['path']['log'], 'print_log.txt'), "a")

    def write(self, message):
        self.terminal.write(message)
        self.log.write(message)

    def flush(self):
示例#5
0
文件: common.py 项目: gisce/erpclient
def terp_survey():
    if options['survey.position']==SURVEY_VERSION:
        return False

    def color_set(widget, name):
        colour = widget.get_colormap().alloc_color(common.colors.get(name,'white'))
        widget.modify_bg(gtk.STATE_ACTIVE, colour)
        widget.modify_fg(gtk.STATE_NORMAL, gtk.gdk.color_parse("black"))
        widget.modify_base(gtk.STATE_NORMAL, colour)
        widget.modify_text(gtk.STATE_NORMAL, gtk.gdk.color_parse("black"))
        widget.modify_text(gtk.STATE_INSENSITIVE, gtk.gdk.color_parse("black"))

    widnames = ('country','role','industry','employee','hear','system','opensource')
    winglade = glade.XML(common.terp_path("dia_survey.glade"), "dia_survey", gettext.textdomain())
    win = winglade.get_widget('dia_survey')
    parent = service.LocalService('gui.main').window
    win.set_transient_for(parent)
    win.set_icon(OPENERP_ICON)
    for widname in widnames:
        wid = winglade.get_widget('combo_'+widname)
        wid.child.set_editable(False)

    email_widget = winglade.get_widget('entry_email')
    want_ebook_widget = winglade.get_widget('check_button_ebook')

    def toggled_cb(togglebutton, *args):
        value = togglebutton.get_active()
        color_set(email_widget, ('normal', 'required')[value])

    want_ebook_widget.connect('toggled', toggled_cb)

    while True:
        res = win.run()
        if res == gtk.RESPONSE_OK:
            email = email_widget.get_text()
            want_ebook = want_ebook_widget.get_active()
            if want_ebook and not len(email):
                color_set(email_widget, 'invalid')
            else:
                company =  winglade.get_widget('entry_company').get_text()
                phone = winglade.get_widget('entry_phone').get_text()
                name = winglade.get_widget('entry_name').get_text()
                city = winglade.get_widget('entry_city').get_text()
                result = "\ncompany: "+str(company)
                result += "\nname: " + str(name)
                result += "\nphone: " + str(phone)
                result += "\ncity: " + str(city)
                for widname in widnames:
                    wid = winglade.get_widget('combo_'+widname)
                    result += "\n" + widname + ": " + wid.child.get_text()
                result += "\nplan_use: " + str(winglade.get_widget('check_use').get_active())
                result += "\nplan_sell: " + str(winglade.get_widget('check_sell').get_active())
                result += "\nwant_ebook: " + str(want_ebook)

                buffer = winglade.get_widget('textview_comment').get_buffer()
                iter_start = buffer.get_start_iter()
                iter_end = buffer.get_end_iter()
                result += "\nnote: " + buffer.get_text(iter_start, iter_end, False)
                upload_data(email, result, type='SURVEY '+str(SURVEY_VERSION))
                options['survey.position']=SURVEY_VERSION
                options.save()
                parent.present()
                win.destroy()
                common.message(_('Thank you for the feedback !\n\
Your comments have been sent to OpenERP.\n\
You should now start by creating a new database or\n\
connecting to an existing server through the "File" menu.'))
                break
        elif res == gtk.RESPONSE_CANCEL or gtk.RESPONSE_DELETE_EVENT:
            parent.present()
            win.destroy()
            common.message(_('Thank you for testing OpenERP !\n\
You should now start by creating a new database or\n\
connecting to an existing server through the "File" menu.'))
            break

    return True
def main():
    # os.environ['CUDA_VISIBLE_DEVICES']='1' # You can specify your GPU device here. 
    parser = argparse.ArgumentParser(description='Train Super Resolution Models')
    parser.add_argument('-opt', type=str, required=True, help='Path to options JSON file.')
    opt = option.parse(parser.parse_args().opt)

    if opt['train']['resume'] is False:
        util.mkdir_and_rename(opt['path']['exp_root'])  # rename old experiments if exists
        util.mkdirs((path for key, path in opt['path'].items() if not key == 'exp_root' and \
                     not key == 'pretrain_G' and not key == 'pretrain_D'))
        option.save(opt)
        opt = option.dict_to_nonedict(opt)  # Convert to NoneDict, which return None for missing key.
    else:
        opt = option.dict_to_nonedict(opt)
        if opt['train']['resume_path'] is None:
            raise ValueError("The 'resume_path' does not declarate")

    if opt['exec_debug']:
        NUM_EPOCH = 100
        opt['datasets']['train']['dataroot_HR'] = opt['datasets']['train']['dataroot_HR_debug']
        opt['datasets']['train']['dataroot_LR'] = opt['datasets']['train']['dataroot_LR_debug']

    else:
        NUM_EPOCH = int(opt['train']['num_epochs'])

    # random seed
    seed = opt['train']['manual_seed']
    if seed is None:
        seed = random.randint(1, 10000)
    print("Random Seed: ", seed)
    random.seed(seed)
    torch.manual_seed(seed)

    # create train and val dataloader
    for phase, dataset_opt in opt['datasets'].items():
        if phase == 'train':
            train_set = create_dataset(dataset_opt)
            train_loader = create_dataloader(train_set, dataset_opt)
            print('Number of train images in [%s]: %d' % (dataset_opt['name'], len(train_set)))
        elif phase == 'val':
            val_set = create_dataset(dataset_opt)
            val_loader = create_dataloader(val_set, dataset_opt)
            print('Number of val images in [%s]: %d' % (dataset_opt['name'], len(val_set)))
        elif phase == 'test':
            pass
        else:
            raise NotImplementedError("Phase [%s] is not recognized." % phase)

    if train_loader is None:
        raise ValueError("The training data does not exist")

    if opt['mode'] == 'sr':
        solver = SRModel(opt)
    else:
        assert 'Invalid opt.mode [%s] for SRModel class!'

    solver.summary(train_set[0]['LR'].size())
    solver.net_init()
    print('[Start Training]')

    start_time = time.time()

    start_epoch = 1
    if opt['train']['resume']:
        start_epoch = solver.load()

    for epoch in range(start_epoch, NUM_EPOCH + 1):
        # Initialization
        solver.training_loss = 0.0
        epoch_loss_log = 0.0

        if opt['mode'] == 'sr' :
            training_results = {'batch_size': 0, 'training_loss': 0.0}
        else:
            pass    # TODO
        train_bar = tqdm(train_loader)

        # Train model
        for iter, batch in enumerate(train_bar):
            solver.feed_data(batch)
            iter_loss = solver.train_step()
            epoch_loss_log += iter_loss.item()
            batch_size = batch['LR'].size(0)
            training_results['batch_size'] += batch_size

            if opt['mode'] == 'sr':
                training_results['training_loss'] += iter_loss * batch_size
                train_bar.set_description(desc='[%d/%d] Loss: %.4f ' % (
                    epoch, NUM_EPOCH, iter_loss))
            else:
                pass    # TODO

        solver.last_epoch_loss = epoch_loss_log / (len(train_bar))

        train_bar.close()
        time_elapse = time.time() - start_time
        start_time = time.time()
        print('Train Loss: %.4f' % (training_results['training_loss'] / training_results['batch_size']))

        # validate
        val_results = {'batch_size': 0, 'val_loss': 0.0, 'psnr': 0.0, 'ssim': 0.0}

        if epoch % solver.val_step == 0 and epoch != 0:
            print('[Validating...]')
            start_time = time.time()
            solver.val_loss = 0.0

            vis_index = 1

            for iter, batch in enumerate(val_loader):
                visuals_list = []

                solver.feed_data(batch)
                iter_loss = solver.test(opt['chop'])
                batch_size = batch['LR'].size(0)
                val_results['batch_size'] += batch_size

                visuals = solver.get_current_visual()   # float cpu tensor

                sr_img = np.transpose(util.quantize(visuals['SR'], opt['rgb_range']).numpy(), (1,2,0)).astype(np.uint8)
                gt_img = np.transpose(util.quantize(visuals['HR'], opt['rgb_range']).numpy(), (1,2,0)).astype(np.uint8)

                # calculate PSNR
                crop_size = opt['scale']
                cropped_sr_img = sr_img[crop_size:-crop_size, crop_size:-crop_size, :]
                cropped_gt_img = gt_img[crop_size:-crop_size, crop_size:-crop_size, :]

                val_results['val_loss'] += iter_loss * batch_size

                val_results['psnr'] += util.calc_psnr(cropped_sr_img, cropped_gt_img)
                val_results['ssim'] += util.calc_ssim(cropped_sr_img, cropped_gt_img)

                if opt['mode'] == 'srgan':
                    pass    # TODO

                visuals_list.extend([util.quantize(visuals['HR'].squeeze(0), opt['rgb_range']),
                                     util.quantize(visuals['SR'].squeeze(0), opt['rgb_range'])])

                images = torch.stack(visuals_list)
                img = thutil.make_grid(images, nrow=2, padding=5)
                ndarr = img.byte().permute(1, 2, 0).numpy()
                misc.imsave(os.path.join(solver.vis_dir, 'epoch_%d_%d.png' % (epoch, vis_index)), ndarr)
                vis_index += 1

            avg_psnr = val_results['psnr']/val_results['batch_size']
            avg_ssim = val_results['ssim']/val_results['batch_size']
            print('Valid Loss: %.4f | Avg. PSNR: %.4f | Avg. SSIM: %.4f | Learning Rate: %f'%(val_results['val_loss']/val_results['batch_size'], avg_psnr, avg_ssim, solver.current_learning_rate()))

            time_elapse = start_time - time.time()

            #if epoch%solver.log_step == 0 and epoch != 0:
            # tensorboard visualization
            solver.training_loss = training_results['training_loss'] / training_results['batch_size']
            solver.val_loss = val_results['val_loss'] / val_results['batch_size']

            solver.tf_log(epoch)

            # statistics
            if opt['mode'] == 'sr' :
                solver.results['training_loss'].append(solver.training_loss.cpu().data.item())
                solver.results['val_loss'].append(solver.val_loss.cpu().data.item())
                solver.results['psnr'].append(avg_psnr)
                solver.results['ssim'].append(avg_ssim)
            else:
                pass    # TODO

            is_best = False
            if solver.best_prec < solver.results['psnr'][-1]:
                solver.best_prec = solver.results['psnr'][-1]
                is_best = True

            solver.save(epoch, is_best)

        # update lr
        solver.update_learning_rate(epoch)

    data_frame = pd.DataFrame(
        data={'training_loss': solver.results['training_loss']
            , 'val_loss': solver.results['val_loss']
            , 'psnr': solver.results['psnr']
            , 'ssim': solver.results['ssim']
              },
        index=range(1, NUM_EPOCH+1)
    )
    data_frame.to_csv(os.path.join(solver.results_dir, 'train_results.csv'),
                      index_label='Epoch')
示例#7
0
def main():
    # options
    parser = argparse.ArgumentParser()
    parser.add_argument('-opt',
                        type=str,
                        required=True,
                        help='Path to option JSON file.')
    opt = option.parse(parser.parse_args().opt, is_train=True)

    util.mkdir_and_rename(
        opt['path']['experiments_root'])  # rename old experiments if exists
    util.mkdirs((path for key, path in opt['path'].items() if not key == 'experiments_root' and \
        not key == 'pretrain_model_G' and not key == 'pretrain_model_D'))
    option.save(opt)
    opt = option.dict_to_nonedict(
        opt)  # Convert to NoneDict, which return None for missing key.

    # print to file and std_out simultaneously
    sys.stdout = PrintLogger(opt['path']['log'])

    # random seed
    seed = opt['train']['manual_seed']
    if seed is None:
        seed = random.randint(1, 10000)
    print("Random Seed: ", seed)
    random.seed(seed)
    torch.manual_seed(seed)

    # create train and val dataloader
    for phase, dataset_opt in opt['datasets'].items():
        if phase == 'train':
            train_set = create_dataset(dataset_opt)
            train_size = int(
                math.ceil(len(train_set) / dataset_opt['batch_size']))
            print('Number of train images: {:,d}, iters: {:,d}'.format(
                len(train_set), train_size))
            total_iters = int(opt['train']['niter'])
            total_epoches = int(math.ceil(total_iters / train_size))
            print('Total epoches needed: {:d} for iters {:,d}'.format(
                total_epoches, total_iters))
            train_loader = create_dataloader(train_set, dataset_opt)
            batch_size_per_month = dataset_opt['batch_size']
            batch_size_per_day = int(
                opt['datasets']['train']['batch_size_per_day'])
            num_month = int(opt['train']['num_month'])
            num_day = int(opt['train']['num_day'])
        elif phase == 'val':
            val_dataset_opt = dataset_opt
            val_set = create_dataset(dataset_opt)
            val_loader = create_dataloader(val_set, dataset_opt)
            print('Number of val images in [{:s}]: {:d}'.format(
                dataset_opt['name'], len(val_set)))
        else:
            raise NotImplementedError(
                'Phase [{:s}] is not recognized.'.format(phase))
    assert train_loader is not None

    # Create model
    model = create_model(opt)
    # create logger
    logger = Logger(opt)

    current_step = 0
    start_time = time.time()
    print('---------- Start training -------------')
    validate(val_loader, opt, model, current_step, 0, logger)
    for epoch in range(num_month):
        for i, train_data in enumerate(train_loader):
            cur_month_code = get_code_for_data_two(model, train_data, opt)
            for j in range(num_day):
                current_step += 1
                # get the sliced data
                cur_day_batch_start_idx = (
                    j * batch_size_per_day) % batch_size_per_month
                cur_day_batch_end_idx = cur_day_batch_start_idx + batch_size_per_day
                if cur_day_batch_end_idx > batch_size_per_month:
                    cur_day_batch_idx = np.hstack(
                        (np.arange(cur_day_batch_start_idx,
                                   batch_size_per_month),
                         np.arange(cur_day_batch_end_idx -
                                   batch_size_per_month)))
                else:
                    cur_day_batch_idx = slice(cur_day_batch_start_idx,
                                              cur_day_batch_end_idx)

                cur_day_train_data = {
                    'LR': train_data['LR'][cur_day_batch_idx],
                    'HR': train_data['HR'][cur_day_batch_idx]
                }
                code = []
                for gen_code in cur_month_code:
                    code.append(gen_code[cur_day_batch_idx])

                # training
                model.feed_data(cur_day_train_data, code=code)
                model.optimize_parameters(current_step)

                time_elapsed = time.time() - start_time
                start_time = time.time()

                # log
                if current_step % opt['logger']['print_freq'] == 0:
                    logs = model.get_current_log()
                    print_rlt = OrderedDict()
                    print_rlt['model'] = opt['model']
                    print_rlt['epoch'] = epoch
                    print_rlt['iters'] = current_step
                    print_rlt['time'] = time_elapsed
                    for k, v in logs.items():
                        print_rlt[k] = v
                    print_rlt['lr'] = model.get_current_learning_rate()
                    logger.print_format_results('train', print_rlt)

                # save models
                if current_step % opt['logger']['save_checkpoint_freq'] == 0:
                    print('Saving the model at the end of iter {:d}.'.format(
                        current_step))
                    model.save(current_step)

                # validation
                if current_step % opt['train']['val_freq'] == 0:
                    validate(val_loader, opt, model, current_step, epoch,
                             logger)

                # update learning rate
                model.update_learning_rate()

    print('Saving the final model.')
    model.save('latest')
    print('End of training.')
def main():
    # options
    parser = argparse.ArgumentParser()
    parser.add_argument('-opt', type=str, required=True, help='Path to option JSON file.')
    parser.add_argument('-single_GPU', action='store_true',help='Utilize only one GPU')
    parser.add_argument('-chroma', action='store_true',help='Training the chroma-channels generator')
    if parser.parse_args().single_GPU:
        available_GPUs = util.Assign_GPU(maxMemory=0.66)
    else:
        # available_GPUs = util.Assign_GPU(max_GPUs=None,maxMemory=0.8,maxLoad=0.8)
        available_GPUs = util.Assign_GPU(max_GPUs=None)
    opt = option.parse(parser.parse_args().opt, is_train=True,batch_size_multiplier=len(available_GPUs),name='JPEG'+('_chroma' if parser.parse_args().chroma else ''))

    if not opt['train']['resume']:
        util.mkdir_and_rename(opt['path']['experiments_root'])  # Modify experiment name if exists
        util.mkdirs((path for key, path in opt['path'].items() if not key == 'experiments_root' and \
            not key == 'pretrained_model_G' and not key == 'pretrained_model_D'))
    option.save(opt)
    opt = option.dict_to_nonedict(opt)  # Convert to NoneDict, which return None for missing key.

    # print to file and std_out simultaneously
    sys.stdout = PrintLogger(opt['path']['log'])

    # random seed
    seed = opt['train']['manual_seed']
    if seed is None:
        seed = random.randint(1, 10000)
    print("Random Seed: ", seed)
    random.seed(seed)
    torch.manual_seed(seed)

    # create train and val dataloader
    for phase, dataset_opt in opt['datasets'].items():
        if phase == 'train':
            max_accumulation_steps = max([opt['train']['grad_accumulation_steps_G'], opt['train']['grad_accumulation_steps_D']])
            train_set = create_dataset(dataset_opt)
            train_size = int(math.ceil(len(train_set) / dataset_opt['batch_size']))
            print('Number of train images: {:,d}, iters: {:,d}'.format(len(train_set), train_size))
            total_iters = int(opt['train']['niter']*max_accumulation_steps)#-current_step
            total_epoches = int(math.ceil(total_iters / train_size))
            print('Total epoches needed: {:d} for iters {:,d}'.format(total_epoches, total_iters))
            train_loader = create_dataloader(train_set, dataset_opt)
        elif phase == 'val':
            val_dataset_opt = dataset_opt
            val_set = create_dataset(dataset_opt)
            val_loader = create_dataloader(val_set, dataset_opt)
            print('Number of val images in [{:s}]: {:d}'.format(dataset_opt['name'], len(val_set)))
        else:
            raise NotImplementedError('Phase [{:s}] is not recognized.'.format(phase))
    assert train_loader is not None
    DEBUG = False
    # Create model
    if DEBUG:
        from models.base_model import BaseModel
        model = BaseModel
        model.step = 0
    else:
        model = create_model(opt,max_accumulation_steps,chroma_mode=opt['name'][:len('JPEG/chroma')]=='JPEG/chroma')

    # create logger
    logger = Logger(opt)
    # Save validation set results as image collage:
    SAVE_IMAGE_COLLAGE = True
    start_time,start_time_gradient_step = time.time(),model.step // max_accumulation_steps
    save_GT_Uncomp = True
    lr_too_low = False
    print('---------- Start training -------------')
    last_saving_time = time.time()
    recently_saved_models = deque(maxlen=4)
    for epoch in range(int(math.floor(model.step / train_size)),total_epoches):
        for i, train_data in enumerate(train_loader):
            model.gradient_step_num = model.step // max_accumulation_steps
            not_within_batch = model.step % max_accumulation_steps == (max_accumulation_steps - 1)
            saving_step = ((time.time()-last_saving_time)>60*opt['logger']['save_checkpoint_freq']) and not_within_batch
            if saving_step:
                last_saving_time = time.time()

            # save models
            if lr_too_low or saving_step:
                model.save_log()
                recently_saved_models.append(model.save(model.gradient_step_num))
                if len(recently_saved_models)>3:
                    model_2_delete = recently_saved_models.popleft()
                    os.remove(model_2_delete)
                    if model.D_exists:
                        os.remove(model_2_delete.replace('_G.','_D.'))
                print('{}: Saving the model before iter {:d}.'.format(datetime.now().strftime('%H:%M:%S'),model.gradient_step_num))
                if lr_too_low:
                    break

            if model.step > total_iters:
                break

            # time_elapsed = time.time() - start_time
            # if not_within_batch:    start_time = time.time()
            # log
            if model.gradient_step_num % opt['logger']['print_freq'] == 0 and not_within_batch:
                logs = model.get_current_log()
                print_rlt = OrderedDict()
                print_rlt['model'] = opt['model']
                print_rlt['epoch'] = epoch
                print_rlt['iters'] = model.gradient_step_num
                # time_elapsed = time.time() - start_time
                print_rlt['time'] = (time.time() - start_time)/np.maximum(1,model.gradient_step_num-start_time_gradient_step)
                start_time, start_time_gradient_step = time.time(), model.gradient_step_num
                for k, v in logs.items():
                    print_rlt[k] = v
                print_rlt['lr'] = model.get_current_learning_rate()
                logger.print_format_results('train', print_rlt,keys_ignore_list=['avg_est_err'])
                model.display_log_figure()

            # validation
            if (not_within_batch or i==0) and (model.gradient_step_num) % opt['train']['val_freq'] == 0: # and model.gradient_step_num>=opt['train']['D_init_iters']:
                print_rlt = OrderedDict()
                if model.generator_changed:
                    print('---------- validation -------------')
                    start_time = time.time()
                    if False and SAVE_IMAGE_COLLAGE and model.gradient_step_num%opt['train']['val_save_freq'] == 0: #Saving training images:
                        # GT_image_collage,quantized_image_collage = [],[]
                        cur_train_results = model.get_current_visuals(entire_batch=True)
                        train_psnrs = [util.calculate_psnr(util.tensor2img(cur_train_results['Decomp'][im_num], out_type=np.uint8,min_max=[0,255]),
                            util.tensor2img(cur_train_results['Uncomp'][im_num], out_type=np.uint8,min_max=[0,255])) for im_num in range(len(cur_train_results['Decomp']))]
                        #Save latest training batch output:
                        save_img_path = os.path.join(os.path.join(opt['path']['val_images']),
                                                     '{:d}_Tr_PSNR{:.3f}.png'.format(model.gradient_step_num, np.mean(train_psnrs)))
                        util.save_img(np.clip(np.concatenate((np.concatenate([util.tensor2img(cur_train_results['Uncomp'][im_num], out_type=np.uint8,min_max=[0,255]) for im_num in
                                 range(len(cur_train_results['Decomp']))],0), np.concatenate(
                                [util.tensor2img(cur_train_results['Decomp'][im_num], out_type=np.uint8,min_max=[0,255]) for im_num in range(len(cur_train_results['Decomp']))],
                                0)), 1), 0, 255).astype(np.uint8), save_img_path)
                    Z_latent = [0]+([-0.5,0.5] if opt['network_G']['latent_input'] else [])
                    print_rlt['psnr'] = 0
                    for cur_Z in Z_latent:
                        model.perform_validation(data_loader=val_loader,cur_Z=cur_Z,print_rlt=print_rlt,GT_and_quantized=save_GT_Uncomp,
                                                 save_images=((model.gradient_step_num) % opt['train']['val_save_freq'] == 0) or save_GT_Uncomp)
                    if save_GT_Uncomp:  # Save GT Uncomp images
                        save_GT_Uncomp = False
                    print_rlt['psnr'] /= len(Z_latent)
                    model.log_dict['psnr_val'].append((model.gradient_step_num,print_rlt['psnr']))
                else:
                    print('Skipping validation because generator is unchanged')
                # time_elapsed = time.time() - start_time
                # Save to log
                print_rlt['model'] = opt['model']
                print_rlt['epoch'] = epoch
                print_rlt['iters'] = model.gradient_step_num
                # print_rlt['time'] = time_elapsed
                print_rlt['time'] = (time.time() - start_time)/np.maximum(1,model.gradient_step_num-start_time_gradient_step)
                # model.display_log_figure()
                # model.generator_changed = False
                logger.print_format_results('val', print_rlt,keys_ignore_list=['avg_est_err'])
                print('-----------------------------------')

            model.feed_data(train_data,mixed_Y=True)
            model.optimize_parameters()


            # update learning rate
            if not_within_batch:
                lr_too_low = model.update_learning_rate(model.gradient_step_num)
            # current_step += 1
        if lr_too_low:
            print('Stopping training because LR is too low')
            break

    print('Saving the final model.')
    model.save(model.gradient_step_num)
    model.save_log()
    print('End of training.')
示例#9
0
def main():
    # options
    parser = argparse.ArgumentParser()
    parser.add_argument('-opt',
                        type=str,
                        required=True,
                        help='Path to option JSON file.')
    parser.add_argument('-single_GPU',
                        action='store_true',
                        help='Utilize only one GPU')
    if parser.parse_args().single_GPU:
        available_GPUs = util.Assign_GPU()
    else:
        available_GPUs = util.Assign_GPU(max_GPUs=None)
    opt = option.parse(parser.parse_args().opt,
                       is_train=True,
                       batch_size_multiplier=len(available_GPUs))

    if not opt['train']['resume']:
        util.mkdir_and_rename(
            opt['path']
            ['experiments_root'])  # Modify experiment name if exists
        util.mkdirs((path for key, path in opt['path'].items() if not key == 'experiments_root' and \
            not key == 'pretrained_model_G' and not key == 'pretrained_model_D'))
    option.save(opt)
    opt = option.dict_to_nonedict(
        opt)  # Convert to NoneDict, which return None for missing key.
    # print to file and std_out simultaneously
    sys.stdout = PrintLogger(opt['path']['log'])
    # random seed
    seed = opt['train']['manual_seed']
    if seed is None:
        seed = random.randint(1, 10000)
    print("Random Seed: ", seed)
    random.seed(seed)
    torch.manual_seed(seed)

    # create train and val dataloader
    for phase, dataset_opt in opt['datasets'].items():
        if phase == 'train':
            max_accumulation_steps = max([
                opt['train']['grad_accumulation_steps_G'],
                opt['train']['grad_accumulation_steps_D']
            ])
            train_set = create_dataset(dataset_opt)
            train_size = int(
                math.ceil(len(train_set) / dataset_opt['batch_size']))
            print('Number of train images: {:,d}, iters: {:,d}'.format(
                len(train_set), train_size))
            total_iters = int(opt['train']['niter'] *
                              max_accumulation_steps)  #-current_step
            total_epoches = int(math.ceil(total_iters / train_size))
            print('Total epoches needed: {:d} for iters {:,d}'.format(
                total_epoches, total_iters))
            train_loader = create_dataloader(train_set, dataset_opt)
        elif phase == 'val':
            val_dataset_opt = dataset_opt
            val_set = create_dataset(dataset_opt)
            val_loader = create_dataloader(val_set, dataset_opt)
            print('Number of val images in [{:s}]: {:d}'.format(
                dataset_opt['name'], len(val_set)))
        else:
            raise NotImplementedError(
                'Phase [{:s}] is not recognized.'.format(phase))
    assert train_loader is not None
    # Create model
    if max_accumulation_steps != 1:
        model = create_model(opt, max_accumulation_steps)
    else:
        model = create_model(opt)
    # create logger
    logger = Logger(opt)
    # Save validation set results as image collage:
    SAVE_IMAGE_COLLAGE = True
    per_image_saved_patch = min(
        [min(im['HR'].shape[1:]) for im in val_loader.dataset]) - 2
    num_val_images = len(val_loader.dataset)
    val_images_collage_rows = int(np.floor(np.sqrt(num_val_images)))
    while val_images_collage_rows > 1:
        if np.round(num_val_images / val_images_collage_rows
                    ) == num_val_images / val_images_collage_rows:
            break
        val_images_collage_rows -= 1
    start_time = time.time()
    min_accumulation_steps = min([
        opt['train']['grad_accumulation_steps_G'],
        opt['train']['grad_accumulation_steps_D']
    ])
    save_GT_HR = True
    lr_too_low = False
    print('---------- Start training -------------')
    last_saving_time = time.time()
    recently_saved_models = deque(maxlen=4)
    for epoch in range(int(math.floor(model.step / train_size)),
                       total_epoches):
        for i, train_data in enumerate(train_loader):
            gradient_step_num = model.step // max_accumulation_steps
            not_within_batch = model.step % max_accumulation_steps == (
                max_accumulation_steps - 1)
            saving_step = (
                (time.time() - last_saving_time) > 60 *
                opt['logger']['save_checkpoint_freq']) and not_within_batch
            if saving_step:
                last_saving_time = time.time()

            # save models
            if lr_too_low or saving_step:
                recently_saved_models.append(model.save(gradient_step_num))
                model.save_log()
                if len(recently_saved_models) > 3:
                    model_2_delete = recently_saved_models.popleft()
                    os.remove(model_2_delete)
                    if model.D_exists:
                        os.remove(model_2_delete.replace('_G.', '_D.'))
                print('{}: Saving the model before iter {:d}.'.format(
                    datetime.now().strftime('%H:%M:%S'), gradient_step_num))
                if lr_too_low:
                    break

            if model.step > total_iters:
                break

            # training
            model.feed_data(train_data)
            model.optimize_parameters()
            if not model.D_exists:  #Avoid using the naive MultiLR scheduler when using adversarial loss
                for scheduler in model.schedulers:
                    scheduler.step(model.gradient_step_num)
            time_elapsed = time.time() - start_time
            if not_within_batch: start_time = time.time()

            # log
            if gradient_step_num % opt['logger'][
                    'print_freq'] == 0 and not_within_batch:
                logs = model.get_current_log()
                print_rlt = OrderedDict()
                print_rlt['model'] = opt['model']
                print_rlt['epoch'] = epoch
                print_rlt['iters'] = gradient_step_num
                print_rlt['time'] = time_elapsed
                for k, v in logs.items():
                    print_rlt[k] = v
                print_rlt['lr'] = model.get_current_learning_rate()
                logger.print_format_results('train',
                                            print_rlt,
                                            keys_ignore_list=IGNORED_KEYS_LIST)
                model.display_log_figure()

            # validation
            if not_within_batch and (gradient_step_num) % opt['train'][
                    'val_freq'] == 0:  # and gradient_step_num>=opt['train']['D_init_iters']:
                print_rlt = OrderedDict()
                if model.generator_changed:
                    print('---------- validation -------------')
                    start_time = time.time()
                    if False and SAVE_IMAGE_COLLAGE and model.gradient_step_num % opt[
                            'train'][
                                'val_save_freq'] == 0:  #Saving training images:
                        GT_image_collage = []
                        cur_train_results = model.get_current_visuals(
                            entire_batch=True)
                        train_psnrs = [
                            util.calculate_psnr(
                                util.tensor2img(
                                    cur_train_results['SR'][im_num],
                                    out_type=np.float32) * 255,
                                util.tensor2img(
                                    cur_train_results['HR'][im_num],
                                    out_type=np.float32) * 255)
                            for im_num in range(len(cur_train_results['SR']))
                        ]
                        #Save latest training batch output:
                        save_img_path = os.path.join(
                            os.path.join(opt['path']['val_images']),
                            '{:d}_Tr_PSNR{:.3f}.png'.format(
                                gradient_step_num, np.mean(train_psnrs)))
                        util.save_img(
                            np.clip(
                                np.concatenate(
                                    (np.concatenate([
                                        util.tensor2img(
                                            cur_train_results['HR'][im_num],
                                            out_type=np.float32) * 255
                                        for im_num in range(
                                            len(cur_train_results['SR']))
                                    ], 0),
                                     np.concatenate([
                                         util.tensor2img(
                                             cur_train_results['SR'][im_num],
                                             out_type=np.float32) * 255
                                         for im_num in range(
                                             len(cur_train_results['SR']))
                                     ], 0)), 1), 0, 255).astype(np.uint8),
                            save_img_path)
                    Z_latent = [0] + ([-1, 1] if
                                      opt['network_G']['latent_input'] else [])
                    print_rlt['psnr'] = 0
                    for cur_Z in Z_latent:
                        sr_images = model.perform_validation(
                            data_loader=val_loader,
                            cur_Z=cur_Z,
                            print_rlt=print_rlt,
                            save_GT_HR=save_GT_HR,
                            save_images=((model.gradient_step_num) %
                                         opt['train']['val_save_freq'] == 0)
                            or save_GT_HR)
                        if logger.use_tb_logger:
                            logger.tb_logger.log_images(
                                'validation_Z%.2f' % (cur_Z),
                                [im[:, :, [2, 1, 0]] for im in sr_images],
                                model.gradient_step_num)

                        if save_GT_HR:  # Save GT Uncomp images
                            save_GT_HR = False
                    model.log_dict['psnr_val'].append(
                        (gradient_step_num, print_rlt['psnr'] / len(Z_latent)))
                else:
                    print('Skipping validation because generator is unchanged')
                time_elapsed = time.time() - start_time
                # Save to log
                print_rlt['model'] = opt['model']
                print_rlt['epoch'] = epoch
                print_rlt['iters'] = gradient_step_num
                print_rlt['time'] = time_elapsed
                model.display_log_figure()
                logger.print_format_results('val',
                                            print_rlt,
                                            keys_ignore_list=IGNORED_KEYS_LIST)
                print('-----------------------------------')

            # update learning rate
            if not_within_batch:
                lr_too_low = model.update_learning_rate(gradient_step_num)
        if lr_too_low:
            print('Stopping training because LR is too low')
            break

    print('Saving the final model.')
    model.save(gradient_step_num)
    print('End of training.')
示例#10
0
def main():
    parser = argparse.ArgumentParser(
        description='Train Super Resolution Models')
    parser.add_argument('-opt',
                        type=str,
                        required=True,
                        help='Path to options JSON file.')
    opt = option.parse(parser.parse_args().opt)

    if opt['train']['resume'] is False:
        util.mkdir_and_rename(
            opt['path']['exp_root'])  # rename old experiments if exists
        util.mkdirs((path for key, path in opt['path'].items() if not key == 'exp_root' and \
                     not key == 'pretrain_G' and not key == 'pretrain_D'))
        option.save(opt)
        opt = option.dict_to_nonedict(
            opt)  # Convert to NoneDict, which return None for missing key.
    else:
        opt = option.dict_to_nonedict(opt)
        if opt['train']['resume_path'] is None:
            raise ValueError("The 'resume_path' does not declarate")

    NUM_EPOCH = int(opt['train']['num_epochs'])

    # random seed
    seed = opt['train']['manual_seed']
    if seed is None:
        seed = random.randint(1, 10000)
    print("Random Seed: ", seed)
    random.seed(seed)
    torch.manual_seed(seed)

    # create train and val dataloader
    for phase, dataset_opt in opt['datasets'].items():
        if phase == 'train':
            train_set = create_dataset(dataset_opt)
            train_loader = create_dataloader(train_set, dataset_opt)
            print('Number of train images in [%s]: %d' %
                  (dataset_opt['name'], len(train_set)))
        elif phase == 'val':
            val_set = create_dataset(dataset_opt)
            val_loader = create_dataloader(val_set, dataset_opt)
            print('Number of val images in [%s]: %d' %
                  (dataset_opt['name'], len(val_set)))
        elif phase == 'test':
            pass
        else:
            raise NotImplementedError("Phase [%s] is not recognized." % phase)

    if train_loader is None:
        raise ValueError("The training data does not exist")

    solver = RCGANModel(opt)

    solver.summary(train_set[0]['CAT'].size(), train_set[0]['IR'].size())
    solver.net_init()
    print('[Start Training]')

    start_epoch = 1
    if opt['train']['resume']:
        start_epoch = solver.load()

    for epoch in range(start_epoch, NUM_EPOCH + 1):
        # Initialization
        train_loss_g1 = 0.0
        train_loss_g2 = 0.0
        train_loss_d1 = 0.0
        train_loss_d2 = 0.0

        train_bar = tqdm(train_loader)
        # Train model
        for iter, batch in enumerate(train_bar):
            solver.feed_data(batch)
            loss_g_total, loss_d_total = solver.train_step()
            cur_batch_size = batch['CAT'].size(0)
            train_loss_g1 += loss_g_total[0] * cur_batch_size
            train_loss_g2 += loss_g_total[1] * cur_batch_size
            train_loss_d1 += loss_d_total[0] * cur_batch_size
            train_loss_d2 += loss_d_total[1] * cur_batch_size
            train_bar.set_description(
                desc='[%d/%d] G-Loss: %.4f D-Loss: %.4f' %
                (epoch, NUM_EPOCH, loss_g_total[0] + loss_g_total[1],
                 loss_d_total[0] + loss_d_total[1]))

        solver.results['train_G_loss1'].append(train_loss_g1 / len(train_set))
        solver.results['train_G_loss2'].append(train_loss_g2 / len(train_set))
        solver.results['train_D_loss1'].append(train_loss_d1 / len(train_set))
        solver.results['train_D_loss2'].append(train_loss_d2 / len(train_set))
        print('Train G-Loss: %.4f' %
              ((train_loss_g1 + train_loss_g2) / len(train_set)))
        print('Train D-Loss: %.4f' %
              ((train_loss_d1 + train_loss_d2) / len(train_set)))

        train_bar.close()

        if epoch % solver.val_step == 0 and epoch != 0:
            print('[Validating...]')
            vis_index = 1
            val_loss = 0.0
            for iter, batch in enumerate(val_loader):
                solver.feed_data(batch)
                loss_total = solver.test()
                batch_size = batch['VIS'].size(0)
                vis_list = solver.get_current_visual_list()
                images = torch.stack(vis_list)
                saveimg = thutil.make_grid(images, nrow=3, padding=5)
                saveimg_nd = saveimg.byte().permute(1, 2, 0).numpy()
                misc.imsave(
                    os.path.join(solver.vis_dir,
                                 'epoch_%d_%d.png' % (epoch, vis_index)),
                    saveimg_nd)
                vis_index += 1
                val_loss += loss_total * batch_size

            solver.results['val_G_loss'].append(val_loss / len(val_set))
            print('Valid Loss: %.4f' % (val_loss / len(val_set)))
            # statistics
            is_best = False
            if solver.best_prec > solver.results['val_G_loss'][-1]:
                solver.best_prec = solver.results['val_G_loss'][-1]
                is_best = True

            solver.save(epoch, is_best)

    data_frame = pd.DataFrame(data={
        'train_G_loss1': solver.results['train_G_loss1'],
        'train_G_loss2': solver.results['train_G_loss2'],
        'train_D_loss1': solver.results['train_D_loss1'],
        'train_D_loss2': solver.results['train_D_loss2'],
        'val_G_loss': solver.results['val_G_loss']
    },
                              index=range(1, NUM_EPOCH + 1))

    data_frame.to_csv(os.path.join(solver.results_dir, 'train_results.csv'),
                      index_label='Epoch')
示例#11
0
def main():
    # os.environ['CUDA_VISIBLE_DEVICES']="1" # You can specify your GPU device here. I failed to perform it by `torch.cuda.set_device()`.
    parser = argparse.ArgumentParser(
        description='Train Super Resolution Models')
    parser.add_argument('-opt',
                        type=str,
                        required=True,
                        help='Path to options JSON file.')
    opt = option.parse(parser.parse_args().opt)

    if opt['train']['resume'] is False:
        util.mkdir_and_rename(
            opt['path']['exp_root'])  # rename old experiments if exists
        util.mkdirs((path for key, path in opt['path'].items() if not key == 'exp_root' and \
                     not key == 'pretrain_G' and not key == 'pretrain_D'))
        option.save(opt)
        opt = option.dict_to_nonedict(
            opt)  # Convert to NoneDict, which return None for missing key.
    else:
        opt = option.dict_to_nonedict(opt)
        if opt['train']['resume_path'] is None:
            raise ValueError("The 'resume_path' does not declarate")

    if opt['exec_debug']:
        NUM_EPOCH = 100
        opt['datasets']['train']['dataroot_HR'] = opt['datasets']['train'][
            'dataroot_HR_debug']  #"./dataset/TrainData/DIV2K_train_HR_sub",
        opt['datasets']['train']['dataroot_LR'] = opt['datasets']['train'][
            'dataroot_LR_debug']  #./dataset/TrainData/DIV2K_train_HR_sub_LRx3"

    else:
        NUM_EPOCH = int(opt['train']['num_epochs'])

    # random seed
    seed = opt['train']['manual_seed']  #0
    if seed is None:
        seed = random.randint(1, 10000)
    print("Random Seed: ", seed)
    random.seed(seed)
    torch.manual_seed(seed)

    # create train and val dataloader
    for phase, dataset_opt in opt['datasets'].items():
        if phase == 'train':
            train_set = create_dataset(dataset_opt)
            train_loader = create_dataloader(train_set, dataset_opt)
            print('Number of train images in [%s]: %d' %
                  (dataset_opt['name'], len(train_set)))
        elif phase == 'val':
            val_set = create_dataset(dataset_opt)
            val_loader = create_dataloader(val_set, dataset_opt)
            print('Number of val images in [%s]: %d' %
                  (dataset_opt['name'], len(val_set)))
        elif phase == 'test':
            pass
        else:
            raise NotImplementedError("Phase [%s] is not recognized." % phase)

    if train_loader is None:
        raise ValueError("The training data does not exist")

    # TODO: design an exp that can obtain the location of the biggest error
    if opt['mode'] == 'sr':
        solver = SRModel1(opt)
    elif opt['mode'] == 'fi':
        solver = SRModel1(opt)
    elif opt['mode'] == 'srgan':
        solver = SRModelGAN(opt)
    elif opt['mode'] == 'msan':
        solver = SRModel1(opt)
    elif opt['mode'] == 'sr_curriculum':
        solver = SRModelCurriculum(opt)

    solver.summary(train_set[0]['LR'].size())
    solver.net_init()
    print('[Start Training]')

    start_time = time.time()

    start_epoch = 1
    if opt['train']['resume']:
        start_epoch = solver.load()

    for epoch in range(start_epoch, NUM_EPOCH + 1):
        # Initialization
        solver.training_loss = 0.0
        epoch_loss_log = 0.0

        if opt['mode'] == 'sr' or opt['mode'] == 'srgan' or opt[
                'mode'] == 'sr_curriculum' or opt['mode'] == 'fi' or opt[
                    'mode'] == 'msan':
            training_results = {'batch_size': 0, 'training_loss': 0.0}
        else:
            pass  # TODO
        train_bar = tqdm(train_loader)

        # Train model
        for iter, batch in enumerate(train_bar):
            solver.feed_data(batch)
            iter_loss = solver.train_step()
            epoch_loss_log += iter_loss.item()
            batch_size = batch['LR'].size(0)
            training_results['batch_size'] += batch_size

            if opt['mode'] == 'sr':
                training_results['training_loss'] += iter_loss * batch_size
                train_bar.set_description(desc='[%d/%d] Loss: %.4f ' %
                                          (epoch, NUM_EPOCH, iter_loss))
            elif opt['mode'] == 'srgan':
                training_results['training_loss'] += iter_loss * batch_size
                train_bar.set_description(desc='[%d/%d] Loss: %.4f ' %
                                          (epoch, NUM_EPOCH, iter_loss))
            elif opt['mode'] == 'fi':
                training_results['training_loss'] += iter_loss * batch_size
                train_bar.set_description(desc='[%d/%d] Loss: %.4f ' %
                                          (epoch, NUM_EPOCH, iter_loss))
            elif opt['mode'] == 'msan':
                training_results['training_loss'] += iter_loss * batch_size
                train_bar.set_description(desc='[%d/%d] Loss: %.4f ' %
                                          (epoch, NUM_EPOCH, iter_loss))
            elif opt['mode'] == 'sr_curriculum':
                training_results[
                    'training_loss'] += iter_loss.data * batch_size
                train_bar.set_description(desc='[%d/%d] Loss: %.4f ' %
                                          (epoch, NUM_EPOCH, iter_loss))
            else:
                pass  # TODO

        solver.last_epoch_loss = epoch_loss_log / (len(train_bar))

        train_bar.close()
        time_elapse = time.time() - start_time
        start_time = time.time()
        print('Train Loss: %.4f' % (training_results['training_loss'] /
                                    training_results['batch_size']))

        # validate
        val_results = {
            'batch_size': 0,
            'val_loss': 0.0,
            'psnr': 0.0,
            'ssim': 0.0
        }

        if epoch % solver.val_step == 0 and epoch != 0:
            print('[Validating...]')
            start_time = time.time()
            solver.val_loss = 0.0

            vis_index = 1

            for iter, batch in enumerate(val_loader):
                visuals_list = []

                solver.feed_data(batch)
                iter_loss = solver.test(opt['chop'])
                batch_size = batch['LR'].size(0)
                val_results['batch_size'] += batch_size

                visuals = solver.get_current_visual()  # float cpu tensor

                sr_img = np.transpose(
                    util.quantize(visuals['SR'], opt['rgb_range']).numpy(),
                    (1, 2, 0)).astype(np.uint8)
                gt_img = np.transpose(
                    util.quantize(visuals['HR'], opt['rgb_range']).numpy(),
                    (1, 2, 0)).astype(np.uint8)

                # calculate PSNR
                crop_size = opt['scale']
                cropped_sr_img = sr_img[crop_size:-crop_size,
                                        crop_size:-crop_size, :]
                cropped_gt_img = gt_img[crop_size:-crop_size,
                                        crop_size:-crop_size, :]

                cropped_sr_img = cropped_sr_img / 255.
                cropped_gt_img = cropped_gt_img / 255.
                cropped_sr_img = rgb2ycbcr(cropped_sr_img).astype(np.float32)
                cropped_gt_img = rgb2ycbcr(cropped_gt_img).astype(np.float32)

                ##################################################################################
                # b, r, g = cv2.split(cropped_sr_img)
                #
                # RG = r - g
                # YB = (r + g) / 2 - b
                # m, n, o = np.shape(cropped_sr_img)  # img为三维 rbg为二维 o并未用到
                # K = m * n
                # alpha_L = 0.1
                # alpha_R = 0.1  # 参数α 可调
                # T_alpha_L = math.ceil(alpha_L * K)  # 向上取整 #表示去除区间
                # T_alpha_R = math.floor(alpha_R * K)  # 向下取整
                #
                # RG_list = RG.flatten()  # 二维数组转一维(方便计算)
                # RG_list = sorted(RG_list)  # 排序
                # sum_RG = 0  # 计算平均值
                # for i in range(T_alpha_L + 1, K - T_alpha_R):
                #     sum_RG = sum_RG + RG_list[i]
                # U_RG = sum_RG / (K - T_alpha_R - T_alpha_L)
                # squ_RG = 0  # 计算方差
                # for i in range(K):
                #     squ_RG = squ_RG + np.square(RG_list[i] - U_RG)
                # sigma2_RG = squ_RG / K
                #
                # # YB和RG计算一样
                # YB_list = YB.flatten()
                # YB_list = sorted(YB_list)
                # sum_YB = 0
                # for i in range(T_alpha_L + 1, K - T_alpha_R):
                #     sum_YB = sum_YB + YB_list[i]
                # U_YB = sum_YB / (K - T_alpha_R - T_alpha_L)
                # squ_YB = 0
                # for i in range(K):
                #     squ_YB = squ_YB + np.square(YB_list[i] - U_YB)
                # sigma2_YB = squ_YB / K
                #
                # uicm = -0.0268 * np.sqrt(np.square(U_RG) + np.square(U_YB)) + 0.1586 * np.sqrt(sigma2_RG + sigma2_RG)
                ##################################################################################

                val_results['val_loss'] += iter_loss * batch_size

                val_results['psnr'] += util.calc_psnr(cropped_sr_img * 255,
                                                      cropped_gt_img * 255)
                val_results['ssim'] += util.compute_ssim1(
                    cropped_sr_img * 255, cropped_gt_img * 255)

                if opt['mode'] == 'srgan':
                    pass  # TODO

                # if opt['save_image']:
                #     visuals_list.extend([util.quantize(visuals['HR'].squeeze(0), opt['rgb_range']),
                #                          util.quantize(visuals['SR'].squeeze(0), opt['rgb_range'])])
                #
                #     images = torch.stack(visuals_list)
                #     img = thutil.make_grid(images, nrow=2, padding=5)
                #     ndarr = img.byte().permute(1, 2, 0).numpy()
                #     misc.imsave(os.path.join(solver.vis_dir, 'epoch_%d_%d.png' % (epoch, vis_index)), ndarr)
                #     vis_index += 1

            avg_psnr = val_results['psnr'] / val_results['batch_size']
            avg_ssim = val_results['ssim'] / val_results['batch_size']
            print(
                'Valid Loss: %.4f | Avg. PSNR: %.4f | Avg. SSIM: %.4f | Learning Rate: %f'
                % (val_results['val_loss'] / val_results['batch_size'],
                   avg_psnr, avg_ssim, solver.current_learning_rate()))

            time_elapse = start_time - time.time()

            #if epoch%solver.log_step == 0 and epoch != 0:
            # tensorboard visualization
            solver.training_loss = training_results[
                'training_loss'] / training_results['batch_size']
            solver.val_loss = val_results['val_loss'] / val_results[
                'batch_size']

            solver.tf_log(epoch)

            # statistics
            if opt['mode'] == 'sr' or opt['mode'] == 'srgan' or opt[
                    'mode'] == 'sr_curriculum' or opt['mode'] == 'fi' or opt[
                        'mode'] == 'msan':
                solver.results['training_loss'].append(
                    solver.training_loss.cpu().data.item())
                solver.results['val_loss'].append(
                    solver.val_loss.cpu().data.item())
                solver.results['psnr'].append(avg_psnr)
                solver.results['ssim'].append(avg_ssim)
            else:
                pass  # TODO

            is_best = False
            if solver.best_prec < solver.results['psnr'][-1]:
                solver.best_prec = solver.results['psnr'][-1]
                is_best = True

            print(
                '#############################################################'
            )
            print(solver.best_prec)
            print(solver.results['psnr'][-1])
            print(
                '***************************************************************'
            )
            # print(is_best)
            # print('+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
            # print('+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
            solver.save(epoch, is_best)

        # update lr
        solver.update_learning_rate(epoch)

    data_frame = pd.DataFrame(data={
        'training_loss': solver.results['training_loss'],
        'val_loss': solver.results['val_loss'],
        'psnr': solver.results['psnr'],
        'ssim': solver.results['ssim']
    },
                              index=range(1, NUM_EPOCH + 1))
    data_frame.to_csv(os.path.join(solver.results_dir, 'train_results.csv'),
                      index_label='Epoch')
示例#12
0
def main():
    # get options
    parser = argparse.ArgumentParser(description='Train Super Resolution Models')
    parser.add_argument('-opt', type=str, required=True, help='Path to options JSON file.')
    opt = option.parse(parser.parse_args().opt, is_train=True)

    if opt['train']['resume'] is False:
        util.mkdir_and_rename(opt['path']['exp_root'])  # rename old experiments if exists
        util.mkdirs((path for key, path in opt['path'].items() if not key == 'exp_root' and \
                     not key == 'pretrain_G' and not key == 'pretrain_D'))
        option.save(opt)
        opt = option.dict_to_nonedict(opt)  # Convert to NoneDict, which return None for missing key.
    else:
        opt = option.dict_to_nonedict(opt)
        if opt['train']['resume_path'] is None:
            raise ValueError("The 'resume_path' does not declarate")

    if opt['exec_debug']:
        NUM_EPOCH = 50
        opt['datasets']['train']['dataroot_HR'] = '/home/ser606/ZhenLi/data/DIV2K/DIV2K_train_HR_debug'
        opt['datasets']['train']['dataroot_LR'] = '/home/ser606/ZhenLi/data/DIV2K/DIV2K_train_LR_debug'
    else:
        NUM_EPOCH = int(opt['train']['num_epochs'])

    # random seed
    seed = opt['train']['manual_seed']
    if seed is None:
        seed = random.randint(1, 10000)
    print("Random Seed: ", seed)
    random.seed(seed)
    torch.manual_seed(seed)

    # create train and val dataloader
    for phase, dataset_opt in opt['datasets'].items():
        if phase == 'train':
            train_set = create_dataset(dataset_opt)
            train_loader = create_dataloader(train_set, dataset_opt)
            print('Number of train images in [%s]: %d' % (dataset_opt['name'], len(train_set)))
        elif phase == 'val':
            val_set = create_dataset(dataset_opt)
            val_loader = create_dataloader(val_set, dataset_opt)
            print('Number of val images in [%s]: %d' % (dataset_opt['name'], len(val_set)))
        else:
            raise NotImplementedError("Phase [%s] is not recognized." % phase)

    if train_loader is None:
        raise ValueError("The training data does not exist")

    if 'coeff' in train_set[0]:
        opt['networks']['G']['num_coeff'] = len(train_set[0]['coeff'])
        opt['train']['num_coeff'] = len(train_set[0]['coeff'])
    # TODO: design an exp that can obtain the location of the biggest error
    solver = SRModel(opt)
    solver.summary(train_set[0]['LR'].size())

    print('[Start Training]')

    start_time = time.time()

    # resume from the latest epoch
    start_epoch = 1
    if opt['train']['resume']:
        checkpoint_path = os.path.join(solver.checkpoint_dir, 'checkpoint.pth')
        print('[Loading checkpoint from %s...]' % checkpoint_path)
        checkpoint = torch.load(checkpoint_path)
        solver.model.load_state_dict(checkpoint['state_dict'].state_dict())
        start_epoch = checkpoint['epoch'] + 1  # Because the last state had been saved
        solver.optimizer.load_state_dict(checkpoint['optimizer'])
        solver.best_prec = checkpoint['best_prec']
        solver.best_epoch = checkpoint['best_epoch']
        solver.results = checkpoint['results']
        print('=> Done.')
    else:
        solver.net_init()


    # best_epoch = 0
    # start train
    for epoch in range(start_epoch, NUM_EPOCH + 1):
        # Initialization
        solver.training_loss = 0.0
        if opt['mode'] == 'sr':
            training_results = {'batch_size': 0, 'training_loss': 0.0}
        else:
            pass  # TODO
        train_bar = tqdm(train_loader)

        # Train model
        for iter, batch in enumerate(train_bar):
            solver.feed_data(batch)
            iter_loss = solver.train_step()
            batch_size = batch['LR'].size(0)
            training_results['batch_size'] += batch_size

            if opt['mode'] == 'sr':
                training_results['training_loss'] += iter_loss * batch_size
                train_bar.set_description(desc='[%d/%d] Train | Loss: %.4f ' % (
                    epoch, NUM_EPOCH, iter_loss))
            else:
                pass  # TODO

        train_bar.close()
        time_elapse = time.time() - start_time
        start_time = time.time()
        # validate
        val_results = {'batch_size': 0, 'val_loss': 0.0, 'psnr': 0.0, 'ssim': 0.0}

        if epoch % solver.val_step == 0 and epoch != 0:
            start_time = time.time()
            solver.val_loss = 0.0

            val_bar = tqdm(val_loader)

            for iter, batch in enumerate(val_bar):
                solver.feed_data(batch)
                iter_loss = solver.test()
                batch_size = batch['LR'].size(0)
                val_results['batch_size'] += batch_size

                if opt['mode'] == 'sr':
                    val_results['val_loss'] += iter_loss * batch_size
                    val_bar.set_description(desc='[%d/%d] Valid | Loss: %.4f ' % (epoch, NUM_EPOCH, iter_loss))
                if opt['mode'] == 'srgan':
                    pass  # TODO

            time_elapse = time.time() - start_time

            # if epoch%solver.log_step == 0 and epoch != 0:
            # tensorboard visualization
            solver.training_loss = training_results['training_loss'] / training_results['batch_size']
            solver.val_loss = val_results['val_loss'] / val_results['batch_size']

            print('\n Train Loss: %.8f | Valid Loss: %.8f | Learning Rate: %f' % (
            solver.training_loss, solver.val_loss, solver.current_learning_rate()))

            # TODO: I haven't installed tensorflow, because I should install cuda 9.0 first
            # solver.tf_log(epoch)

            # statistics
            if opt['mode'] == 'sr':
                solver.results['training_loss'].append(float(solver.training_loss.data.cpu().numpy()))
                solver.results['val_loss'].append(float(solver.val_loss.data.cpu().numpy()))
            else:
                pass  # TODO

            is_best = False
            if solver.best_prec > solver.results['val_loss'][-1]:
                solver.best_prec = solver.results['val_loss'][-1]
                is_best = True
                solver.best_epoch = epoch

            print('The lowest validation error is in %d' % solver.best_epoch)
            solver.save(epoch, is_best)

        # update lr
        solver.update_learning_rate(epoch)

    data_frame = pd.DataFrame(
        data={'training_loss': solver.results['training_loss']
            , 'val_loss': solver.results['val_loss']
              },
        index=range(1, NUM_EPOCH + 1)
    )
    data_frame.to_csv(os.path.join(solver.results_dir, 'train_results.csv'),
                      index_label='Epoch')
示例#13
0
def main():
    # options
    parser = argparse.ArgumentParser()
    parser.add_argument('-opt',
                        type=str,
                        required=True,
                        help='Path to option JSON file.')
    opt = option.parse(parser.parse_args().opt, is_train=True)

    util.mkdir_and_rename(
        opt['path']['experiments_root'])  # rename old experiments if exists
    util.mkdirs((path for key, path in opt['path'].items() if not key == 'experiments_root' and \
        not key == 'pretrain_model_G' and not key == 'pretrain_model_D'))
    option.save(opt)
    opt = option.dict_to_nonedict(
        opt)  # Convert to NoneDict, which return None for missing key.

    # print to file and std_out simultaneously
    sys.stdout = PrintLogger(opt['path']['log'])

    # random seed
    seed = opt['train']['manual_seed']
    if seed is None:
        seed = random.randint(1, 10000)
    print("Random Seed: ", seed)
    random.seed(seed)
    torch.manual_seed(seed)

    # create train and val dataloader
    val_loaders = []
    for phase, dataset_opt in opt['datasets'].items():
        if phase == 'train':
            train_set = create_dataset(dataset_opt)
            train_size = int(
                math.ceil(len(train_set) / dataset_opt['batch_size']))
            print('Number of train images: {:,d}, iters: {:,d}'.format(
                len(train_set), train_size))
            total_iters = int(opt['train']['niter'])
            total_epoches = int(math.ceil(total_iters / train_size))
            print('Total epoches needed: {:d} for iters {:,d}'.format(
                total_epoches, total_iters))
            train_loader = create_dataloader(train_set, dataset_opt)
        elif 'val' in phase:
            val_set = create_dataset(dataset_opt)
            val_loader = create_dataloader(val_set, dataset_opt)
            print('Number of val images in [%s]: %d' %
                  (dataset_opt['name'], len(val_set)))
            val_loaders.append(val_loader)
        else:
            raise NotImplementedError("Phase [%s] is not recognized." % phase)
    assert train_loader is not None

    # Create model
    model = create_model(opt)
    # create logger
    logger = Logger(opt)

    current_step = 0
    start_time = time.time()
    print('---------- Start training -------------')
    for epoch in range(total_epoches):
        for i, train_data in enumerate(train_loader):
            current_step += 1
            if current_step > total_iters:
                break

            # level = random.randint(0, 80)
            level = random.uniform(0, 80)
            # train_data = add_Gaussian_noise(train_data, level)
            train_data = add_dependent_noise(train_data, level)
            model.feed_data(train_data)

            # training
            model.optimize_parameters(current_step)

            time_elapsed = time.time() - start_time
            start_time = time.time()

            # log
            if current_step % opt['logger']['print_freq'] == 0:
                logs = model.get_current_log()
                print_rlt = OrderedDict()
                print_rlt['model'] = opt['model']
                print_rlt['epoch'] = epoch
                print_rlt['iters'] = current_step
                print_rlt['time'] = time_elapsed
                for k, v in logs.items():
                    print_rlt[k] = v
                print_rlt['lr'] = model.get_current_learning_rate()
                logger.print_format_results('train', print_rlt)

            # save models
            if current_step % opt['logger']['save_checkpoint_freq'] == 0:
                print('Saving the model at the end of iter {:d}.'.format(
                    current_step))
                model.save(current_step)

            # validation
            if current_step % opt['train']['val_freq'] == 0:
                print('---------- validation -------------')
                for val_loader in val_loaders:
                    val_set_name = val_loader.dataset.opt['name']
                    print('validation [%s]...' % val_set_name)

                    print_rlt = OrderedDict()

                    start_time = time.time()
                    avg_l2 = 0.0
                    idx = 0

                    sigma_gt = int(val_set_name[-2:])

                    sigma_pre = []

                    for val_data in val_loader:
                        idx += 1
                        val_data = add_dependent_noise(val_data, sigma_gt)
                        model.feed_data(val_data, need_HR=False)
                        sigma = model.test_sigma().squeeze().float().cpu(
                        ).item()
                        sigma_pre.append(sigma)
                        avg_l2 += (sigma - sigma_gt)**2

                    print('sigma: {}'.format(sigma_pre))

                    # log the sigma, time for each quality
                    time_elapsed = time.time() - start_time

                    log_val_name = 'l2_noise{}'.format(sigma_gt)
                    print_rlt[log_val_name] = avg_l2

                    print_rlt['time'] = time_elapsed

                    # Save to log
                    print_rlt['model'] = opt['model']
                    print_rlt['epoch'] = epoch
                    print_rlt['iters'] = current_step
                    logger.print_format_results('val', print_rlt)
                print('-----------------------------------')
                # end of the validation

            # update learning rate
            model.update_learning_rate()

    print('Saving the final model.')
    model.save('latest')
    print('End of training.')
示例#14
0
def terp_survey():
    if options['survey.position'] == SURVEY_VERSION:
        return False

    def color_set(widget, name):
        colour = widget.get_colormap().alloc_color(
            common.colors.get(name, 'white'))
        widget.modify_bg(gtk.STATE_ACTIVE, colour)
        widget.modify_fg(gtk.STATE_NORMAL, gtk.gdk.color_parse("black"))
        widget.modify_base(gtk.STATE_NORMAL, colour)
        widget.modify_text(gtk.STATE_NORMAL, gtk.gdk.color_parse("black"))
        widget.modify_text(gtk.STATE_INSENSITIVE, gtk.gdk.color_parse("black"))

    widnames = ('country', 'role', 'industry', 'employee', 'hear', 'system',
                'opensource')
    winglade = glade.XML(common.terp_path("dia_survey.glade"), "dia_survey",
                         gettext.textdomain())
    win = winglade.get_widget('dia_survey')
    parent = service.LocalService('gui.main').window
    win.set_transient_for(parent)
    win.set_icon(OPENERP_ICON)
    for widname in widnames:
        wid = winglade.get_widget('combo_' + widname)
        wid.child.set_editable(False)

    email_widget = winglade.get_widget('entry_email')
    want_ebook_widget = winglade.get_widget('check_button_ebook')

    def toggled_cb(togglebutton, *args):
        value = togglebutton.get_active()
        color_set(email_widget, ('normal', 'required')[value])

    want_ebook_widget.connect('toggled', toggled_cb)

    while True:
        res = win.run()
        if res == gtk.RESPONSE_OK:
            email = email_widget.get_text()
            want_ebook = want_ebook_widget.get_active()
            if want_ebook and not len(email):
                color_set(email_widget, 'invalid')
            else:
                company = winglade.get_widget('entry_company').get_text()
                phone = winglade.get_widget('entry_phone').get_text()
                name = winglade.get_widget('entry_name').get_text()
                city = winglade.get_widget('entry_city').get_text()
                result = "\ncompany: " + str(company)
                result += "\nname: " + str(name)
                result += "\nphone: " + str(phone)
                result += "\ncity: " + str(city)
                for widname in widnames:
                    wid = winglade.get_widget('combo_' + widname)
                    result += "\n" + widname + ": " + wid.child.get_text()
                result += "\nplan_use: " + str(
                    winglade.get_widget('check_use').get_active())
                result += "\nplan_sell: " + str(
                    winglade.get_widget('check_sell').get_active())
                result += "\nwant_ebook: " + str(want_ebook)

                buffer = winglade.get_widget('textview_comment').get_buffer()
                iter_start = buffer.get_start_iter()
                iter_end = buffer.get_end_iter()
                result += "\nnote: " + buffer.get_text(iter_start, iter_end,
                                                       False)
                upload_data(email,
                            result,
                            type='SURVEY ' + str(SURVEY_VERSION))
                options['survey.position'] = SURVEY_VERSION
                options.save()
                parent.present()
                win.destroy()
                common.message(
                    _('Thank you for the feedback !\n\
Your comments have been sent to OpenERP.\n\
You should now start by creating a new database or\n\
connecting to an existing server through the "File" menu.'))
                break
        elif res == gtk.RESPONSE_CANCEL or gtk.RESPONSE_DELETE_EVENT:
            parent.present()
            win.destroy()
            common.message(
                _('Thank you for testing OpenERP !\n\
You should now start by creating a new database or\n\
connecting to an existing server through the "File" menu.'))
            break

    return True
示例#15
0
def main():
    # options
    parser = argparse.ArgumentParser()
    parser.add_argument('-opt',
                        type=str,
                        required=True,
                        help='Path to option JSON file.')
    opt = option.parse(parser.parse_args().opt, is_train=True)

    util.mkdir_and_rename(
        opt['path']['experiments_root'])  # rename old experiments if exists
    util.mkdirs((path for key, path in opt['path'].items() if not key == 'experiments_root' and \
        not key == 'pretrain_model_G' and not key == 'pretrain_model_D'))
    option.save(opt)
    opt = option.dict_to_nonedict(
        opt)  # Convert to NoneDict, which return None for missing key.

    # print to file and std_out simultaneously
    sys.stdout = PrintLogger(opt['path']['log'])

    # random seed
    seed = opt['train']['manual_seed']
    if seed is None:
        seed = random.randint(1, 10000)
    print("Random Seed: ", seed)
    random.seed(seed)
    torch.manual_seed(seed)

    # create train and val dataloader
    for phase, dataset_opt in opt['datasets'].items():
        if phase == 'train':
            train_set = create_dataset(dataset_opt)
            train_size = int(
                math.ceil(len(train_set) / dataset_opt['batch_size']))
            print('Number of train images: {:,d}, iters: {:,d}'.format(
                len(train_set), train_size))
            total_iters = int(opt['train']['niter'])
            total_epoches = int(math.ceil(total_iters / train_size))
            print('Total epoches needed: {:d} for iters {:,d}'.format(
                total_epoches, total_iters))
            train_loader = create_dataloader(train_set, dataset_opt)
            batch_size_per_month = dataset_opt['batch_size']
            batch_size_per_day = int(
                opt['datasets']['train']['batch_size_per_day'])
            num_month = int(opt['train']['num_month'])
            num_day = int(opt['train']['num_day'])
            use_dci = false if 'use_dci' not in opt['train'] else opt['train'][
                'use_dci']
        elif phase == 'val':
            val_dataset_opt = dataset_opt
            val_set = create_dataset(dataset_opt)
            val_loader = create_dataloader(val_set, dataset_opt)
            print('Number of val images in [{:s}]: {:d}'.format(
                dataset_opt['name'], len(val_set)))
        else:
            raise NotImplementedError(
                'Phase [{:s}] is not recognized.'.format(phase))
    assert train_loader is not None

    # Create model
    model = create_model(opt)
    # create logger
    logger = Logger(opt)

    current_step = 0
    start_time = time.time()
    print('---------- Start training -------------')
    for epoch in range(num_month):
        for i, train_data in enumerate(train_loader):
            # get the code
            if use_dci:
                cur_month_code = get_code_for_data(model, train_data, opt)
            else:
                cur_month_code = get_code(model, train_data, opt)
            for j in range(num_day):
                current_step += 1
                if current_step > total_iters:
                    break
                # get the sliced data
                cur_day_batch_start_idx = (
                    j * batch_size_per_day) % batch_size_per_month
                cur_day_batch_end_idx = cur_day_batch_start_idx + batch_size_per_day
                if cur_day_batch_end_idx > batch_size_per_month:
                    cur_day_batch_idx = np.hstack(
                        (np.arange(cur_day_batch_start_idx,
                                   batch_size_per_month),
                         np.arange(cur_day_batch_end_idx -
                                   batch_size_per_month)))
                else:
                    cur_day_batch_idx = slice(cur_day_batch_start_idx,
                                              cur_day_batch_end_idx)

                cur_day_train_data = {
                    'LR': train_data['LR'][cur_day_batch_idx],
                    'HR': train_data['HR'][cur_day_batch_idx]
                }
                code = cur_month_code[cur_day_batch_idx]

                # training
                model.feed_data(cur_day_train_data, code=code)
                model.optimize_parameters(current_step)

                time_elapsed = time.time() - start_time
                start_time = time.time()

                # log
                if current_step % opt['logger']['print_freq'] == 0:
                    logs = model.get_current_log()
                    print_rlt = OrderedDict()
                    print_rlt['model'] = opt['model']
                    print_rlt['epoch'] = epoch
                    print_rlt['iters'] = current_step
                    print_rlt['time'] = time_elapsed
                    for k, v in logs.items():
                        print_rlt[k] = v
                    print_rlt['lr'] = model.get_current_learning_rate()
                    logger.print_format_results('train', print_rlt)

                # save models
                if current_step % opt['logger']['save_checkpoint_freq'] == 0:
                    print('Saving the model at the end of iter {:d}.'.format(
                        current_step))
                    model.save(current_step)

                # validation
                if current_step % opt['train']['val_freq'] == 0:
                    print('---------- validation -------------')
                    start_time = time.time()

                    avg_psnr = 0.0
                    idx = 0
                    for val_data in val_loader:
                        idx += 1
                        img_name = os.path.splitext(
                            os.path.basename(val_data['LR_path'][0]))[0]
                        img_dir = os.path.join(opt['path']['val_images'],
                                               img_name)
                        util.mkdir(img_dir)

                        if 'zero_code' in opt['train'] and opt['train'][
                                'zero_code']:
                            code_val = torch.zeros(
                                val_data['LR'].shape[0],
                                int(opt['network_G']['in_code_nc']),
                                val_data['LR'].shape[2],
                                val_data['LR'].shape[3])
                        elif 'rand_code' in opt['train'] and opt['train'][
                                'rand_code']:
                            code_val = torch.rand(
                                val_data['LR'].shape[0],
                                int(opt['network_G']['in_code_nc']),
                                val_data['LR'].shape[2],
                                val_data['LR'].shape[3])
                        else:
                            code_val = torch.randn(
                                val_data['LR'].shape[0],
                                int(opt['network_G']['in_code_nc']),
                                val_data['LR'].shape[2],
                                val_data['LR'].shape[3])

                        model.feed_data(val_data, code=code_val)
                        model.test()

                        visuals = model.get_current_visuals()
                        sr_img = util.tensor2img(visuals['SR'])  # uint8
                        gt_img = util.tensor2img(visuals['HR'])  # uint8

                        # Save SR images for reference
                        run_index = opt['name'].split("_")[2]
                        save_img_path = os.path.join(img_dir, 'srim_{:s}_{:s}_{:d}.png'.format( \
                            run_index, img_name, current_step))
                        util.save_img(sr_img, save_img_path)

                        # calculate PSNR
                        crop_size = opt['scale']
                        cropped_sr_img = sr_img[crop_size:-crop_size,
                                                crop_size:-crop_size, :]
                        cropped_gt_img = gt_img[crop_size:-crop_size,
                                                crop_size:-crop_size, :]
                        avg_psnr += util.psnr(cropped_sr_img, cropped_gt_img)

                    avg_psnr = avg_psnr / idx
                    time_elapsed = time.time() - start_time
                    # Save to log
                    print_rlt = OrderedDict()
                    print_rlt['model'] = opt['model']
                    print_rlt['epoch'] = epoch
                    print_rlt['iters'] = current_step
                    print_rlt['time'] = time_elapsed
                    print_rlt['psnr'] = avg_psnr
                    logger.print_format_results('val', print_rlt)
                    print('-----------------------------------')

                # update learning rate
                model.update_learning_rate()

    print('Saving the final model.')
    model.save('latest')
    print('End of training.')
def main():
    parser = argparse.ArgumentParser(
        description='Test Super Resolution Models')
    parser.add_argument('-opt',
                        type=str,
                        required=True,
                        help='Path to options JSON file.')
    opt = option.parse(parser.parse_args().opt)
    opt = option.dict_to_nonedict(opt)

    # initial configure
    scale = opt['scale']
    degrad = opt['degradation']
    network_opt = opt['networks']
    model_name = network_opt['which_model'].upper()

    # create folders
    util.mkdir_and_rename(opt['path']['res_root'])
    option.save(opt)

    # create test dataloader
    bm_names = []
    test_loaders = []
    for ds_name, dataset_opt in sorted(opt['datasets'].items()):
        test_set = create_dataset(dataset_opt)
        test_loader = create_dataloader(test_set, dataset_opt)
        test_loaders.append(test_loader)
        print('===> Test Dataset: [%s]   Number of images: [%d]' %
              (dataset_opt['name'], len(test_set)))
        bm_names.append(dataset_opt['name'])

    # create solver (and load model)
    solver = create_solver(opt)
    # Test phase
    print('===> Start Test')
    print("==================================================")
    print("Method: %s || Scale: %d || Degradation: %s" %
          (model_name, scale, degrad))

    for bm, test_loader in zip(bm_names, test_loaders):
        print("Test set : [%s]" % bm)

        sr_list = []
        path_list = []

        total_psnr = []
        total_ssim = []
        total_time = []
        res_dict = OrderedDict()

        need_HR = False if test_loader.dataset.__class__.__name__.find(
            'HR') < 0 else True

        for iter, batch in tqdm(enumerate(test_loader),
                                total=len(test_loader)):
            solver.feed_data(batch, need_HR=need_HR, need_landmark=False)

            # calculate forward time
            t0 = time.time()
            solver.test()
            t1 = time.time()
            total_time.append((t1 - t0))

            visuals = solver.get_current_visual(need_HR=need_HR)
            sr_list.append(visuals['SR'][-1])

            # calculate PSNR/SSIM metrics on Python
            if need_HR:
                psnr, ssim = util.calc_metrics(visuals['SR'][-1],
                                               visuals['HR'],
                                               crop_border=scale)
                total_psnr.append(psnr)
                total_ssim.append(ssim)
                path_list.append(
                    os.path.basename(batch['HR_path'][0]).replace(
                        'HR', model_name))
                # print(
                #     "[%d/%d] %s || PSNR(dB)/SSIM: %.2f/%.4f || Timer: %.4f sec ."
                #     % (iter + 1, len(test_loader),
                #        os.path.basename(batch['HR_path'][0]), psnr, ssim,
                #        (t1 - t0)))
                res_dict[path_list[-1]] = {
                    'psnr': psnr,
                    'ssim': ssim,
                    'time': t1 - t0
                }

            else:
                path_list.append(os.path.basename(batch['LR_path'][0]))
                # print("[%d/%d] %s || Timer: %.4f sec ." %
                #       (iter + 1, len(test_loader),
                #        os.path.basename(batch['LR_path'][0]), (t1 - t0)))

        if need_HR:
            print("---- Average PSNR(dB) /SSIM /Speed(s) for [%s] ----" % bm)
            average_res_str = "PSNR: %.2f      SSIM: %.4f      Speed: %.4f" % \
                  (sum(total_psnr) / len(total_psnr), sum(total_ssim) /
                   len(total_ssim), sum(total_time) / len(total_time))
            print(average_res_str)
        else:
            print("---- Average Speed(s) for [%s] is %.4f sec ----" %
                  (bm, sum(total_time) / len(total_time)))

        # save SR results for further evaluation on MATLAB
        save_img_path = os.path.join(opt['path']['res_root'], bm)

        print("===> Saving SR images of [%s]... Save Path: [%s]\n" %
              (bm, save_img_path))

        if not os.path.exists(save_img_path): os.makedirs(save_img_path)
        for img, name in zip(sr_list, path_list):
            imageio.imwrite(os.path.join(save_img_path, name), img)
        if need_HR:
            with open(os.path.join(save_img_path, 'result.json'), 'w') as f:
                json.dump(res_dict, f, indent=2)
            with open(os.path.join(save_img_path, 'average_result.txt'),
                      'w') as f:
                f.write(average_res_str + '\n')

    print("==================================================")
    print("===> Finished !")