Exemple #1
0
    for i, data in tqdm(enumerate(dataset)):
        if i >= opt.num_test:
            break
        if len(data['A'].shape) == 4:  # image
            assert opt.dataset_mode == 'single'
            model.set_input(data)  # unpack data from data loader
            model.test()  # run inference
            visuals = model.get_current_visuals()  # get image/video results
            A_paths, _ = model.get_image_paths()  # get image/video paths
            file_name_with_suffix = os.path.basename(
                A_paths[0])  # xxxxxx__0__1.jpg     __0__1 help to locate block
            visualizer.save_for_apply(visuals, file_name_with_suffix, i)
        else:  # video
            assert opt.dataset_mode == 'single_video'
            model.set_input(data)
            model.test()
            visuals = model.get_current_visuals()  # get image/video results
            block_list.append(visuals['HR_G'].cpu())
            if len(block_list) == block_size[0] * block_size[1]:
                A_paths, _ = model.get_image_paths()  # get image/video paths
                video_name = get_dataset_name(A_paths[0])
                visuals['HR_G'] = cat_blocks(block_list, block_size)
                block_list = []
                visualizer.display_and_save(
                    visuals, os.path.join(video_name, '%.6d' % now_deal_frame))
                if data['end_flag']:
                    now_deal_frame = 0
                    print("video: {} is ok!".format(video_name))
                else:
                    now_deal_frame += 1
Exemple #2
0
                HR_list = ensemble.ensemble_inverse(HR_list)
                HR = torch.cat(HR_list, dim=0)
                del HR_list
                HR = HR.mean(dim=0, keepdim=True)
                visuals = model.get_current_visuals()  # get image/video results
                visuals["HR_G"] = HR
                visuals["LR"] = LR

            else:
                model.set_input(data)  # unpack data from data loader
                model.test(compute_visual_flag=True)  # run inference
                visuals = model.get_current_visuals()  # get image/video results

            A_paths, B_paths = model.get_image_paths()  # get image/video paths
            file_name = get_file_name(A_paths[0])
            visualizer.display_and_save(visuals, file_name)
            visualizer.cal_iqa(visuals, file_name)

        # video
        elif len(data['A'].shape) == 5:
            if opt.ensemble:
                raise NotImplementedError("for video, we did not implement ensemble")
            else:
                remove_first = opt.remove_first
                remove_last = opt.remove_last
                LR = data['A']
                HR = data['B']
                HR_G_list = []
                HR_bicubic_list = []
                LR_list = []
                assert LR.shape[1] > remove_first + remove_last
                t_data = iter_start_time - iter_data_time  # Look at the end of the for loop, you will know...
                t_data = t_data / opt.batch_size

            total_iters += opt.batch_size
            epoch_iter += opt.batch_size
            model.set_input(
                data)  # unpack data from dataset and apply preprocessing
            model.nowepoch = epoch
            model.optimize_parameters(
            )  # calculate loss functions, get gradients, update network weights
            '''visualize'''
            if total_iters % opt.display_freq == 0:  # display images on visdom
                model.compute_visuals(
                    dataset
                )  # Calculate additional output images for visualization if you need
                visualizer.display_and_save(model.get_current_visuals(), epoch)
            '''loss'''
            if total_iters % opt.print_freq == 0 or (
                    total_iters <= 5000 and total_iters %
                (10 * opt.batch_size) == 0
            ):  # print training losses and save logging information to the disk
                losses = model.get_current_losses()
                t_comp = (time.time() - iter_start_time) / opt.batch_size
                visualizer.print_and_save_current_losses(
                    epoch, epoch_iter, losses, t_comp, t_data)
                visualizer.plot_current_losses(
                    epoch,
                    float(epoch_iter) / dataset_size, losses)

            iter_data_time = time.time()
        '''save model by epoch'''