예제 #1
0
def main():
    # Parse the command line arguments
    opts = options.parse()
    # torch.backends.cudnn.benchmark = False
    device_ids = [0, 1]
    # if opts.scan_gpus == True:
    #     # Scan the available GPUs to find one currently using less than 10Mb
    #     device_ids = sr_utils.wait_for_GPU(10)
    #     available_gpus = str(device_ids)[1:-1]
    #     print("Using GPUs {}".format(available_gpus))
    #     os.environ['CUDA_VISIBLE_DEVICES']=str(available_gpus)
    #
    #
    # # If using synthetic data launch the subprocess that keeps creating sinograms
    # if opts.synthetic_data == True:
    #     print("Launching the sinogram creation sub process")
    #     p = subprocess.Popen(["python", "create_sinos.py"])
    #     while(os.path.isfile("training_data.npy") == False):
    #         print("waiting for training data...")
    #         time.sleep(5)

    # Create the Network trainer and train!
    # device_ids = None
    trainer = Network_Trainer(opts, device_ids)
    trainer.train()
예제 #2
0
    def main(self):
        try:
            opts = options.parse()
        except(options.ParseError, options.MissingArgumentError):
            print "Usage: digestive rainforestapp/digestive [email protected]"
            exit(1)

        digestive = Digestive(opts.username, opts.repository, opts.emails)
        digestive.process()
예제 #3
0
def main():
    args = options.parse()

    # Create necessary directories if they do not exist.
    for path in [HOME, CACHE]:
        if not os.path.exists(path): os.mkdir(path)

    # Determine the host from either a dns_name or ip_address provided as an argument.
    # 'host' is either args.ip_address, args.dns_name, or args.cidr_range
    #host = args.dns_name if args.dns_name else args.ip_address
    #if host is None: raise Exception('ERROR: no domain/dns name or ip address provided')

    # The api_url is specific to the type of API call (contingent on args.ip_address/cidr_range,
    # or args.dns_name), and further, to A/MX records if dnsrecord API (--dns_name provided)is used.
    # The results of each are different and require different JSON parsing.

    common_prefix = args.api_site
    common_suffix = ''.join(
        ('&apikey=', args.api_key, '&output=', args.api_output))

    if args.ip_address:  # reverseip
        # Force forms to be appropriate: e.g., ip_address is an ip_pattern.
        try:
            ipaddr(args.ip_address).group()
        except AttributeError:
            sys.stdout.write(
                'error: --ip-address requires a valid ip address. Please try again.\n'
            )
            sys.exit(1)

        # Construct the url needed for the reverseip API call.
        middle = ''.join(('reverseip/', '?host=', args.ip_address))
        url = middle.join((common_prefix, common_suffix))
        #try:
        out = reverseip_lookup(url, JSON_REVERSEIP_RESPONSE_FILE, args.keyword)
        #except ConnectionRefusedError: #noqa
        #    # Sleep? Try again? How many times?
        #    print('Connection refused.')
        #    return

    elif args.dns_name:  # dnsrecord
        middle = ''.join(
            ('dnsrecord/', '?domain=', args.dns_name, '&recordtype=', 'A'))
        url = middle.join((common_prefix, common_suffix))
        out = dnsrecord_lookup(url, args.dns_name,
                               JSON_DNSRECORD_RESPONSE_FILE, args.keyword)

    elif args.cidr_range:
        # Should be same as ip_address (e.g., reverseip_lookup()), but will in a loop.
        #startip, endip = args.cidr_range[0], args.cidr_range[1] #noqa
        # cidrs = netaddr.iprange_to_cidrs(startip, endip)
        pass

    print(out)  # Testing.
    # Write to csv style output to OUTPUT_FILE.
    with open(OUTPUT_FILE, 'a') as o:
        o.write(out)
예제 #4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-opt', type=str, help='Path to option YMAL file.')
    parser.add_argument('--launcher', choices=['none', 'pytorch'], default='none',
                        help='job launcher')
    parser.add_argument('--local_rank', type=int, default=0)
    args = parser.parse_args()
    opt = option.parse(args.opt, is_train=True)
    opt = option.dict_to_nonedict(opt)
    dataset_opt = opt['datasets']['test']
    test_set = create_dataset(dataset_opt)
    test_loader = create_dataloader(test_set, dataset_opt, opt, None)
    
    model = Model(opt)
    
    if test_loader is not None:  
        calc_lpips = PerceptualLossLPIPS()
        if True:
            avg_ssim = avg_psnr = avg_lpips = 0
            print("Testing Starts!")
            idx = 0
            test_bar = tqdm(test_loader)
            for test_data in test_bar:
                idx += 1
                img_name = os.path.splitext(os.path.basename(test_data['LQ_path'][0]))[0]
                img_dir = '../test_results_' + opt['name']
                util.mkdir(img_dir)

                model.feed_data(test_data)
                model.test()

                visuals = model.get_current_visuals()
                sr_img = util.tensor2img(visuals['SR'])  # uint8
                gt_img = util.tensor2img(visuals['GT'])  # uint8
                lq_img = util.tensor2img(visuals['LQ'])  # uint8

                # Save SR images for reference
                save_sr_img_path = os.path.join(img_dir,
                                                 '{:s}_sr.png'.format(img_name))

                util.save_img(sr_img, save_sr_img_path)

                gt_img = gt_img / 255.
                sr_img = sr_img / 255.
                lq_img = lq_img / 255.
                avg_psnr += util.calculate_psnr(sr_img * 255, gt_img * 255)
                avg_ssim += util.calculate_ssim(sr_img * 255, gt_img * 255)
                avg_lpips += calc_lpips(visuals['SR'], visuals['GT'])
   
                
            avg_psnr = avg_psnr / idx
            avg_ssim = avg_ssim / idx
            avg_lpips = avg_lpips / idx
                
            print('Test_Result_{:s} psnr: {:.4e} ssim: {:.4e} lpips: {:.4e}'.format(
                    opt['name'], avg_psnr, avg_ssim, avg_lpips))
예제 #5
0
def main():

    args = parse()
    print(args)
    model, data, loss_func, opti, vis = initialization(args)
    print(model, data, loss_func, opti, vis)

    learning_framework = learning.Learning(model, data, opti, loss_func, vis)

    learning_framework.train(10)
예제 #6
0
def main():
    opt = options.parse()
    opt.top_k = 5
    opt.validation_ratio = 0.2

    ##########################################
    # train
    # clf = GradientBoostingClassifier(n_estimators=3000, learning_rate=0.05, max_depth=6, max_features='sqrt')
    # clf = RandomForestClassifier()
    clf = RandomForestClassifier(n_estimators=3500,
                                 criterion='entropy',
                                 max_depth=None,
                                 max_features='sqrt',
                                 min_samples_split=4,
                                 min_samples_leaf=2,
                                 n_jobs=4)

    count = 0
    test_predictions = None

    for i in range(10):
        dl = FeatureDataLoader(opt)
        x_train, y_train, x_valid, y_valid, x_test, test_ids = get_data(dl)

        clf.fit(x_train, y_train)
        clf_probs = clf.predict_proba(x_valid)[:, 1]
        score = log_loss(y_valid, clf_probs)
        test_predictions = clf.predict_proba(x_test)[:, 1]

        print(score)
        # if score < 0.44:
        #   print('{}:{}'.format(i, score))
        #   if test_predictions is None:
        #     test_predictions = clf.predict_proba(x_test)[:, 1]
        #   else:
        #     test_predictions += clf.predict_proba(x_test)[:, 1]
        #   count += 1
    #
    # test_predictions  = test_predictions / count

    with open('submission_random_forest.csv', 'w') as f:
        writer = csv.writer(f)
        # write the header
        for row in {'id': 'cancer'}.items():
            writer.writerow(row)
        # write the content
        for row in zip(test_ids, test_predictions):
            writer.writerow(row)

    # print(log_loss(y_train, clf.predict_proba(x_train)))

    # print(score)
    # print(normalized_score)

    test_predictions = clf.predict_proba(x_test)[:, 1]
예제 #7
0
def parse_options(is_train=True):
    # options
    parser = argparse.ArgumentParser()
    parser.add_argument('-opt',
                        type=str,
                        required=True,
                        help='Path to options file.')

    args = parser.parse_args()
    opt = options.parse(args.opt, is_train=is_train)

    return opt
예제 #8
0
def main():

    args = parse()
    print(args)
    dataloader, dataloader_vis, dataloader_vid, model, vis, lr_scheduler, optimizer = initialization(
        args)
    ################## training   #######################
    for epoch in range(100):
        epoch_loss = 0
        result = []
        result = np.array(result)
        model.train()
        for i_batch, sample_batched in enumerate(dataloader):
            batch_loss, result = pad_update(
                model,
                sample_batched,
                with_attention_flag=args.with_attention_flag,
                pad_flag=args.pad_flag)
            epoch_loss += batch_loss
            vis.plot_current_errors(
                epoch, i_batch * input_batch_size / len(kitti_dataset),
                batch_loss.data)
            print(epoch, '******', i_batch, '*******', batch_loss)
            batch_loss.backward()
            optimizer.step()
        data_length = len(kitti_dataset) // input_batch_size * input_batch_size
        lr_scheduler.step()

        with torch.no_grad():
            if epoch % args.check_period == 0:
                model.eval()
                vis_para.vis = vis
                vis_para.win_number = 0
                vis_para.title = 'training'
                train_error, train_loss = testing(dataloader_vis, vis_para)

                vis_para.win_number = 10
                vis_para.title = 'testing'
                eval_error, eval_loss = testing(dataloader_vid, vid_para)
                vis.plot_epoch_training_validing(epoch, train_loss, eval_loss)
                vis.plot_epoch_training_validing_2(epoch, train_error[1][7],
                                                   eval_error[1][7], 22)
                torch.save(
                    model.state_dict(), '../saved_model/model_' +
                    args.model_name + '_' + str(epoch).zfill(3) + '.pt')
예제 #9
0
파일: train.py 프로젝트: wwhappylife/DAN
def main():
    #### setup options of three networks
    parser = argparse.ArgumentParser()
    parser.add_argument("-opt",
                        type=str,
                        help="Path to option YMAL file of Predictor.")
    parser.add_argument("--launcher",
                        choices=["none", "pytorch"],
                        default="none",
                        help="job launcher")
    parser.add_argument("--local_rank", type=int, default=0)
    args = parser.parse_args()
    opt = option.parse(args.opt, is_train=True)

    # convert to NoneDict, which returns None for missing keys
    opt = option.dict_to_nonedict(opt)

    # choose small opt for SFTMD test, fill path of pre-trained model_F
    #### set random seed
    seed = opt["train"]["manual_seed"]
    if seed is None:
        seed = random.randint(1, 10000)
    util.set_random_seed(seed)

    # load PCA matrix of enough kernel
    print("load PCA matrix")
    pca_matrix = torch.load(opt["pca_matrix_path"],
                            map_location=lambda storage, loc: storage)
    print("PCA matrix shape: {}".format(pca_matrix.shape))

    #### distributed training settings
    if args.launcher == "none":  # disabled distributed training
        opt["dist"] = False
        opt["dist"] = False
        rank = -1
        print("Disabled distributed training.")
    else:
        opt["dist"] = True
        opt["dist"] = True
        init_dist()
        world_size = (
            torch.distributed.get_world_size()
        )  # Returns the number of processes in the current process group
        rank = torch.distributed.get_rank(
        )  # Returns the rank of current process group

    torch.backends.cudnn.benchmark = True
    # torch.backends.cudnn.deterministic = True

    ###### Predictor&Corrector train ######

    #### loading resume state if exists
    if opt["path"].get("resume_state", None):
        # distributed resuming: all load into default GPU
        device_id = torch.cuda.current_device()
        resume_state = torch.load(
            opt["path"]["resume_state"],
            map_location=lambda storage, loc: storage.cuda(device_id),
        )
        option.check_resume(opt, resume_state["iter"])  # check resume options
    else:
        resume_state = None

    #### mkdir and loggers
    if rank <= 0:  # normal training (rank -1) OR distributed training (rank 0-7)
        if resume_state is None:
            # Predictor path
            util.mkdir_and_rename(
                opt["path"]
                ["experiments_root"])  # rename experiment folder if exists
            util.mkdirs(
                (path for key, path in opt["path"].items()
                 if not key == "experiments_root"
                 and "pretrain_model" not in key and "resume" not in key))
            os.system("rm ./log")
            os.symlink(os.path.join(opt["path"]["experiments_root"], ".."),
                       "./log")

        # config loggers. Before it, the log will not work
        util.setup_logger(
            "base",
            opt["path"]["log"],
            "train_" + opt["name"],
            level=logging.INFO,
            screen=True,
            tofile=True,
        )
        util.setup_logger(
            "val",
            opt["path"]["log"],
            "val_" + opt["name"],
            level=logging.INFO,
            screen=True,
            tofile=True,
        )
        logger = logging.getLogger("base")
        logger.info(option.dict2str(opt))
        # tensorboard logger
        if opt["use_tb_logger"] and "debug" not in opt["name"]:
            version = float(torch.__version__[0:3])
            if version >= 1.1:  # PyTorch 1.1
                from torch.utils.tensorboard import SummaryWriter
            else:
                logger.info(
                    "You are using PyTorch {}. Tensorboard will use [tensorboardX]"
                    .format(version))
                from tensorboardX import SummaryWriter
            tb_logger = SummaryWriter(log_dir="log/tb_logger/" + opt["name"])
    else:
        util.setup_logger("base",
                          opt["path"]["log"],
                          "train",
                          level=logging.INFO,
                          screen=True)
        logger = logging.getLogger("base")

    torch.backends.cudnn.benchmark = True
    # torch.backends.cudnn.deterministic = True

    #### create train and val dataloader
    dataset_ratio = 200  # enlarge the size of each epoch
    for phase, dataset_opt in opt["datasets"].items():
        if phase == "train":
            train_set = create_dataset(dataset_opt)
            train_size = int(
                math.ceil(len(train_set) / dataset_opt["batch_size"]))
            total_iters = int(opt["train"]["niter"])
            total_epochs = int(math.ceil(total_iters / train_size))
            if opt["dist"]:
                train_sampler = DistIterSampler(train_set, world_size, rank,
                                                dataset_ratio)
                total_epochs = int(
                    math.ceil(total_iters / (train_size * dataset_ratio)))
            else:
                train_sampler = None
            train_loader = create_dataloader(train_set, dataset_opt, opt,
                                             train_sampler)
            if rank <= 0:
                logger.info(
                    "Number of train images: {:,d}, iters: {:,d}".format(
                        len(train_set), train_size))
                logger.info("Total epochs needed: {:d} for iters {:,d}".format(
                    total_epochs, total_iters))
        elif phase == "val":
            val_set = create_dataset(dataset_opt)
            val_loader = create_dataloader(val_set, dataset_opt, opt, None)
            if rank <= 0:
                logger.info("Number of val images in [{:s}]: {:d}".format(
                    dataset_opt["name"], len(val_set)))
        else:
            raise NotImplementedError(
                "Phase [{:s}] is not recognized.".format(phase))
    assert train_loader is not None
    assert val_loader is not None

    #### create model
    model = create_model(opt)  # load pretrained model of SFTMD

    #### resume training
    if resume_state:
        logger.info("Resuming training from epoch: {}, iter: {}.".format(
            resume_state["epoch"], resume_state["iter"]))

        start_epoch = resume_state["epoch"]
        current_step = resume_state["iter"]
        model.resume_training(resume_state)  # handle optimizers and schedulers
    else:
        current_step = 0
        start_epoch = 0

    prepro = util.SRMDPreprocessing(
        opt["scale"],
        pca_matrix,
        random=True,
        para_input=opt["code_length"],
        kernel=opt["kernel_size"],
        noise=False,
        cuda=True,
        sig=None,
        sig_min=opt["sig_min"],
        sig_max=opt["sig_max"],
        rate_iso=1.0,
        scaling=3,
        rate_cln=0.2,
        noise_high=0.0,
    )
    #### training
    logger.info("Start training from epoch: {:d}, iter: {:d}".format(
        start_epoch, current_step))
    for epoch in range(start_epoch, total_epochs + 1):
        if opt["dist"]:
            train_sampler.set_epoch(epoch)
        for _, train_data in enumerate(train_loader):
            current_step += 1
            if current_step > total_iters:
                break
            #### preprocessing for LR_img and kernel map
            LR_img, ker_map = prepro(train_data["GT"])
            LR_img = (LR_img * 255).round() / 255
            #### training Predictor
            model.feed_data(LR_img, train_data["GT"], ker_map)
            model.optimize_parameters(current_step)
            model.update_learning_rate(current_step,
                                       warmup_iter=opt["train"]["warmup_iter"])
            visuals = model.get_current_visuals()

            #### log of model_P
            if current_step % opt["logger"]["print_freq"] == 0:
                logs = model.get_current_log()
                message = "Predictor <epoch:{:3d}, iter:{:8,d}, lr:{:.3e}> ".format(
                    epoch, current_step, model.get_current_learning_rate())
                for k, v in logs.items():
                    message += "{:s}: {:.4e} ".format(k, v)
                    # tensorboard logger
                    if opt["use_tb_logger"] and "debug" not in opt["name"]:
                        if rank <= 0:
                            tb_logger.add_scalar(k, v, current_step)
                if rank <= 0:
                    logger.info(message)

            # validation, to produce ker_map_list(fake)
            if current_step % opt["train"]["val_freq"] == 0 and rank <= 0:
                avg_psnr = 0.0
                idx = 0
                for _, val_data in enumerate(val_loader):

                    # LR_img, ker_map = prepro(val_data['GT'])
                    LR_img = val_data["LQ"]
                    lr_img = util.tensor2img(
                        LR_img)  # save LR image for reference

                    # valid Predictor
                    model.feed_data(LR_img, val_data["GT"])
                    model.test()
                    visuals = model.get_current_visuals()

                    # Save images for reference
                    img_name = os.path.splitext(
                        os.path.basename(val_data["LQ_path"][0]))[0]
                    img_dir = os.path.join(opt["path"]["val_images"], img_name)
                    # img_dir = os.path.join(opt['path']['val_images'], str(current_step), '_', str(step))
                    util.mkdir(img_dir)
                    save_lr_path = os.path.join(img_dir,
                                                "{:s}_LR.png".format(img_name))
                    util.save_img(lr_img, save_lr_path)

                    sr_img = util.tensor2img(visuals["SR"])  # uint8
                    gt_img = util.tensor2img(visuals["GT"])  # uint8

                    save_img_path = os.path.join(
                        img_dir,
                        "{:s}_{:d}.png".format(img_name, current_step))
                    util.save_img(sr_img, save_img_path)

                    # calculate PSNR
                    crop_size = opt["scale"]
                    gt_img = gt_img / 255.0
                    sr_img = sr_img / 255.0
                    cropped_sr_img = sr_img[crop_size:-crop_size,
                                            crop_size:-crop_size, :]
                    cropped_gt_img = gt_img[crop_size:-crop_size,
                                            crop_size:-crop_size, :]

                    avg_psnr += util.calculate_psnr(cropped_sr_img * 255,
                                                    cropped_gt_img * 255)
                    idx += 1

                avg_psnr = avg_psnr / idx

                # log
                logger.info("# Validation # PSNR: {:.6f}".format(avg_psnr))
                logger_val = logging.getLogger("val")  # validation logger
                logger_val.info(
                    "<epoch:{:3d}, iter:{:8,d}, psnr: {:.6f}".format(
                        epoch, current_step, avg_psnr))
                # tensorboard logger
                if opt["use_tb_logger"] and "debug" not in opt["name"]:
                    tb_logger.add_scalar("psnr", avg_psnr, current_step)

            #### save models and training states
            if current_step % opt["logger"]["save_checkpoint_freq"] == 0:
                if rank <= 0:
                    logger.info("Saving models and training states.")
                    model.save(current_step)
                    model.save_training_state(epoch, current_step)

    if rank <= 0:
        logger.info("Saving the final model.")
        model.save("latest")
        logger.info("End of Predictor and Corrector training.")
    tb_logger.close()
예제 #10
0
def main():
    # parameters and flags
    args = parse()
    valid_period = 5
    visualize_training_period = 5
    save_visualize_training_period = 5
    input_batch_size = args.batch_size
    finetune_flag = False
    coor_layer_flag = False
    use_gpu_flag = False
    data_balance_flag = False
    color_flag = False
    rpe_flag = False
    camera_parameter = [640, 180, 640, 640, 320, 90]
    image_size = (camera_parameter[1], camera_parameter[0])

    ################## init model###########################
    model = VONet.SPADVONet(coor_layer_flag=coor_layer_flag,
                            color_flag=color_flag)
    model = model.float()
    print(model)
    if use_gpu_flag:
        # uncomment this to use multi-gpu
        #model     = nn.DataParallel(model.cuda())
        model = model.cuda()
    if finetune_flag:
        model.load_state_dict(torch.load(args.model_load))
    optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=0)
    lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer,
                                               lr_lambda=LambdaLR(200, 0,
                                                                  50).step)
    print(optimizer)

    ################### load data####################
    # training data
    motion_files_path = args.motion_path
    path_files_path = args.image_list_path
    print(motion_files_path)
    print(path_files_path)
    # transform
    if color_flag:
        transforms_ = [
            transforms.Resize(image_size),
            transforms.ColorJitter(brightness=0.1,
                                   contrast=0.1,
                                   saturation=0.1,
                                   hue=0.1),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ]
    else:
        transforms_ = [
            transforms.Resize(image_size),
            transforms.ColorJitter(brightness=0.1,
                                   contrast=0.1,
                                   saturation=0.1,
                                   hue=0.1),
            transforms.ToTensor()
        ]
    kitti_dataset = data_loader.SepeDataset(
        path_to_poses_files=motion_files_path,
        path_to_image_lists=path_files_path,
        transform_=transforms_,
        camera_parameter=camera_parameter,
        coor_layer_flag=coor_layer_flag,
        color_flag=color_flag)

    dataloader = DataLoader(kitti_dataset,
                            batch_size=input_batch_size,
                            shuffle=True,
                            num_workers=4,
                            drop_last=True)
    if data_balance_flag:
        print('data balance by prob')
        dataloader = DataLoader(kitti_dataset,
                                batch_size=input_batch_size,
                                shuffle=False,
                                num_workers=4,
                                drop_last=True,
                                sampler=kitti_dataset.sampler)
    else:
        print('no data balance')
    dataloader_vis = DataLoader(kitti_dataset,
                                batch_size=input_batch_size,
                                shuffle=False,
                                num_workers=4,
                                drop_last=True)
    # testing data
    motion_files_path_test = args.motion_path_test
    path_files_path_test = args.image_list_path_test
    print(motion_files_path_test)
    print(path_files_path_test)
    # transform
    if color_flag:
        transforms_ = [
            transforms.Resize(image_size),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ]
    else:
        transforms_ = [transforms.Resize(image_size), transforms.ToTensor()]

    kitti_dataset_test = data_loader.SepeDataset(
        path_to_poses_files=motion_files_path_test,
        path_to_image_lists=path_files_path_test,
        transform_=transforms_,
        camera_parameter=camera_parameter,
        norm_flag=1,
        coor_layer_flag=coor_layer_flag,
        color_flag=color_flag)

    dataloader_vid = DataLoader(kitti_dataset_test,
                                batch_size=input_batch_size,
                                shuffle=False,
                                num_workers=4,
                                drop_last=True)
    print(len(kitti_dataset), len(kitti_dataset_test))

    training_loss_data = open(
        '../checkpoint/saved_result/' + args.model_name + '_training.loss',
        'a')
    testing_loss_data = open(
        '../checkpoint/saved_result/' + args.model_name + '_testing.loss', 'a')
    training_ate_data = open(
        '../checkpoint/saved_result/' + args.model_name + '_training.ate', 'a')
    testing_ate_data = open(
        '../checkpoint/saved_result/' + args.model_name + '_testing.ate', 'a')
    ################## training   #######################
    for epoch in range(101):
        epoch_loss = 0
        result = []
        result = np.array(result)
        model.train()
        for i_batch, sample_batched in enumerate(dataloader):
            batch_loss, result = pad_update(model,
                                            sample_batched,
                                            use_gpu_flag=use_gpu_flag)
            epoch_loss += batch_loss
            print(epoch, '******', i_batch, '/', len(dataloader), '*******',
                  batch_loss.item())
            batch_loss.backward()
            optimizer.step()
        data_length = len(kitti_dataset) // input_batch_size * input_batch_size
        epoch_loss_mean = epoch_loss * input_batch_size / data_length
        lr_scheduler.step()

        ####Visualization Path###############################################################
        with torch.no_grad():
            if epoch % valid_period == 0:
                model.eval()
                forward_visual_result = []
                ground_truth = []
                epoch_loss_visu = 0
                for i_batch, sample_batched in enumerate(dataloader_vis):
                    print('visu************** i_batch', i_batch,
                          '******************')
                    model.zero_grad()
                    batch_loss, result = pad_update(model,
                                                    sample_batched,
                                                    use_gpu_flag=use_gpu_flag)
                    #batch_loss.backward()
                    batch_loss.detach_()
                    training_loss_data.write(
                        str(batch_loss.cpu().data.tolist()) + '\n')
                    training_loss_data.flush()
                    epoch_loss_visu += batch_loss
                    temp_f = weighted_mean_motion(result)
                    gt_f_12 = sample_batched['motion_f_01'].numpy()
                    forward_visual_result = np.append(forward_visual_result,
                                                      temp_f)
                    ground_truth = np.append(ground_truth, gt_f_12)
                    if i_batch > 100:
                        break
                data_length = len(
                    kitti_dataset) // input_batch_size * input_batch_size
                forward_visual_result = forward_visual_result.reshape(
                    -1, 6) * kitti_dataset.motion_stds
                #forward_visual_result[:,no_motion_flag]=0
                #ground_truth = ground_truth.reshape(data_length,6)*kitti_dataset.motion_stds+kitti_dataset.motion_means
                ground_truth = ground_truth.reshape(
                    -1, 6) * kitti_dataset.motion_stds

                forward_visual_result_m = tf.eular2pose(forward_visual_result)
                ground_truth_m = tf.eular2pose(ground_truth)
                if rpe_flag:
                    rot_train, tra_train = evaluate.evaluate(
                        ground_truth_m, forward_visual_result_m)
                    training_ate_data.write(
                        str(np.mean(tra_train)) + ' ' +
                        str(np.mean(rot_train)) + '\n')
                    training_ate_data.flush()
                torch.save(
                    model.state_dict(), '../checkpoint/saved_model/model_' +
                    args.model_name + '_' + str(epoch).zfill(3) + '.pt')
                ####Vilidation Path###############################################################
                model.eval()
                forward_visual_result = []
                backward_visual_result = []
                ground_truth = []
                forward_visual_opti = []
                for i_batch, sample_batched in enumerate(dataloader_vid):
                    #opti vis
                    print('test************** i_batch', i_batch,
                          '******************')
                    batch_loss_eval, predicted_result_eval = pad_update(
                        model, sample_batched, use_gpu_flag=use_gpu_flag)
                    batch_loss_eval.detach_()
                    testing_loss_data.write(
                        str(batch_loss_eval.cpu().data.tolist()) + '\n')
                    testing_loss_data.flush()
                    temp_f = weighted_mean_motion(predicted_result_eval)
                    gt_f_12 = sample_batched['motion_f_01'].numpy()
                    forward_visual_result = np.append(forward_visual_result,
                                                      temp_f)
                    ground_truth = np.append(ground_truth, gt_f_12)
                data_length = len(
                    kitti_dataset_test) // input_batch_size * input_batch_size
                forward_visual_result = forward_visual_result.reshape(
                    data_length, 6) * kitti_dataset_test.motion_stds
                #forward_visual_result[:,no_motion_flag]=0
                #ground_truth = ground_truth.reshape(data_length,6)*kitti_dataset_test.motion_stds+kitti_dataset_test.motion_means
                ground_truth = ground_truth.reshape(
                    data_length, 6) * kitti_dataset_test.motion_stds

                forward_visual_result_m = tf.eular2pose(forward_visual_result)
                ground_truth_m = tf.eular2pose(ground_truth)
                plot_path([ground_truth_m, forward_visual_result_m], epoch,
                          args)
                #forward_visual_result_m = tf.ses2poses(forward_visual_result)
                #ground_truth_m          = tf.ses2poses(ground_truth)
                if rpe_flag:
                    rot_eval, tra_eval = evaluate.evaluate(
                        ground_truth_m, forward_visual_result_m)

                    testing_ate_data.write(
                        str(np.mean(tra_eval)) + ' ' + str(np.mean(rot_eval)) +
                        '\n')
                    testing_ate_data.flush()
예제 #11
0
파일: test_IKC.py 프로젝트: zhwzhong/DAN
    default="options/test/SFTMD/test_SFTMD_x4.yml",
    help="Path to options YMAL file.",
)
parser.add_argument(
    "-opt_P",
    type=str,
    default="options/test/Predictor/test_Predictor_x4.yml",
    help="Path to options YMAL file.",
)
parser.add_argument(
    "-opt_C",
    type=str,
    default="options/test/Corrector/test_Corrector_x4.yml",
    help="Path to options YMAL file.",
)
opt_F = option.parse(parser.parse_args().opt_F, is_train=False)
opt_P = option.parse(parser.parse_args().opt_P, is_train=False)
opt_C = option.parse(parser.parse_args().opt_C, is_train=False)

opt_F = option.dict_to_nonedict(opt_F)
opt_P = option.dict_to_nonedict(opt_P)
opt_C = option.dict_to_nonedict(opt_C)

#### mkdir and logger
util.mkdirs((path for key, path in opt_P["path"].items()
             if not key == "experiments_root" and "pretrain_model" not in key
             and "resume" not in key))
util.mkdirs((path for key, path in opt_C["path"].items()
             if not key == "experiments_root" and "pretrain_model" not in key
             and "resume" not in key))
예제 #12
0
def main(args):
    flist, opt = options.parse(__version__, args)

    print "options:"
    pprint.pprint(opt)
    pprint.pprint(flist)
예제 #13
0
import torch

import options as option

sys.path.insert(0, "../../")
import utils as util

parser = argparse.ArgumentParser()
parser.add_argument(
    "-opt",
    type=str,
    default="options/setting1/train/train_setting1_x4.yml",
    help="Path to options YMAL file.",
)
args = parser.parse_args()
opt = option.parse(args.opt, is_train=True)

setting = opt["degradation"]

is_iso = True if setting["rate_iso"] == 1 else False

batch_ker = util.random_batch_kernel(
    batch=30000,
    l=setting["ksize"],
    sig_min=setting["sig_min"],
    sig_max=setting["sig_max"],
    rate_iso=setting["rate_iso"],
    random_disturb=setting["random_disturb"],
    tensor=False,
)
print("batch kernel shape: {}".format(batch_ker.shape))
예제 #14
0
def main():

    # parameters and flags
    args = parse()
    valid_period = 5
    visualize_training_period = 5
    save_visualize_training_period = 5
    input_batch_size = args.batch_size
    finetune_flag = False
    coor_layer_flag = args.coor_layer_flag
    pad_flag = args.pad_flag
    with_attention_flag = args.with_attention_flag
    print(coor_layer_flag, pad_flag, with_attention_flag)
    use_gpu_flag = True
    #motion_flag = [2,4]
    motion_flag = [0, 1, 2, 3, 4, 5]
    data_balance_flag = False
    color_flag = False
    vis_flag = False
    ate_flag = True
    debug_flag = False
    args.color_flag = color_flag
    args.data_balance_flag = data_balance_flag
    args.coor_layer_flag = coor_layer_flag
    no_motion_flag = [d not in motion_flag for d in range(0, 6)]
    print(motion_flag, no_motion_flag)
    #camera_parameter=[450,180,225,225,225,90]
    #camera_parameter=[651,262,651,651,320,130]
    camera_parameter = [640, 180, 640, 640, 320, 90]
    image_size = (camera_parameter[1], camera_parameter[0])
    args.camera_parameter = camera_parameter
    args.image_size = image_size

    ################## init model###########################
    model = models.VONet.PADVONet(coor_layer_flag=coor_layer_flag,
                                  color_flag=color_flag)
    model = model.float()
    if use_gpu_flag:
        #model     = nn.DataParallel(model.cuda())
        model = model.cuda()
    if finetune_flag:
        model.load_state_dict(torch.load(args.model_load))
    optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=0)
    lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer,
                                               lr_lambda=LambdaLR(200, 0,
                                                                  50).step)
    print(optimizer)
    #ego_pre = ep.EgomotionPrediction()
    ################### load data####################
    dataloader, dataloader_vis, dataloader_vid = dataloader_init(args)

    epoch_loss_visu_mean = 0
    epoch_loss_eval_mean = 0
    if vis_flag:
        vis = visualizer.Visualizer(args.visdom_ip, args.visdom_port)
        print('vis', args.visdom_ip, args.visdom_port)
    training_loss_data = open(
        '../checkpoint/saved_result/' + args.model_name + '_training.loss',
        'a')
    testing_loss_data = open(
        '../checkpoint/saved_result/' + args.model_name + '_testing.loss', 'a')
    training_ate_data = open(
        '../checkpoint/saved_result/' + args.model_name + '_training.ate', 'a')
    testing_ate_data = open(
        '../checkpoint/saved_result/' + args.model_name + '_testing.ate', 'a')
    ################## training   #######################
    for epoch in range(101):
        epoch_loss = 0
        result = []
        result = np.array(result)
        model.train()
        for i_batch, sample_batched in enumerate(dataloader):
            batch_loss, result = pad_update(
                model,
                sample_batched,
                with_attention_flag=with_attention_flag,
                pad_flag=pad_flag,
                motion_flag=motion_flag)
            #att_0 = result[1]
            #vis.plot_heat_map(att_0[0,0,:,:])
            epoch_loss += batch_loss
            if vis_flag:
                vis.plot_current_errors(
                    epoch, i_batch * input_batch_size / len(kitti_dataset),
                    batch_loss.data)
            print(epoch, '******', i_batch, '/', len(dataloader), '*******',
                  batch_loss.item())
            batch_loss.backward()
            optimizer.step()
            if debug_flag:
                if i_batch > 10:
                    break
        epoch_loss_mean = epoch_loss / len(dataloader)
        if vis_flag:
            vis.plot_epoch_current_errors(epoch, epoch_loss_mean.data)
        lr_scheduler.step()

        ####Visualization Path###############################################################
        with torch.no_grad():
            if epoch % valid_period == 0:
                model.eval()
                forward_visual_result = []
                ground_truth = []
                epoch_loss_visu = 0
                for i_batch, sample_batched in enumerate(dataloader_vis):
                    print('visu************** i_batch', i_batch,
                          '******************')
                    model.zero_grad()
                    batch_loss, result = pad_update(
                        model,
                        sample_batched,
                        with_attention_flag=with_attention_flag,
                        pad_flag=pad_flag,
                        motion_flag=motion_flag)

                    att_0 = result[1]
                    if vis_flag:
                        vis.plot_heat_map(att_0[0, 0, :, :])
                    #batch_loss.backward()
                    batch_loss.detach_()
                    training_loss_data.write(
                        str(batch_loss.cpu().data.tolist()) + '\n')
                    training_loss_data.flush()
                    epoch_loss_visu += batch_loss
                    temp_f = weighted_mean_motion(result, with_attention_flag)
                    gt_f_12 = sample_batched['motion_f_01'].numpy()
                    forward_visual_result = np.append(forward_visual_result,
                                                      temp_f)
                    ground_truth = np.append(ground_truth, gt_f_12)
                    if debug_flag:
                        if i_batch * input_batch_size > 1500:
                            break
                data_length = len(dataloader_vis) * input_batch_size
                epoch_loss_visu_mean = epoch_loss_visu / len(dataloader_vis)
                forward_visual_result = forward_visual_result.reshape(
                    -1, 6) * dataloader_vis.dataset.motion_stds
                forward_visual_result[:, no_motion_flag] = 0
                #ground_truth = ground_truth.reshape(data_length,6)*kitti_dataset.motion_stds+kitti_dataset.motion_means
                ground_truth = ground_truth.reshape(
                    -1, 6) * dataloader_vis.dataset.motion_stds

                forward_visual_result_m = tf.eular2pose(forward_visual_result)
                ground_truth_m = tf.eular2pose(ground_truth)
                #forward_visual_result_m = tf.ses2poses(forward_visual_result)
                #ground_truth_m          = tf.ses2poses(ground_truth)
                if ate_flag:
                    rot_train, tra_train = evaluate.evaluate(
                        ground_truth_m, forward_visual_result_m)
                    training_ate_data.write(
                        str(np.mean(tra_train)) + ' ' +
                        str(np.mean(rot_train)) + '\n')
                    training_ate_data.flush()
                if vis_flag:
                    vis.plot_path_with_gt(forward_visual_result_m,
                                          ground_truth_m, 5,
                                          'training set forward')
                torch.save(
                    model.state_dict(), '../checkpoint/saved_model/model_' +
                    args.model_name + '_' + str(epoch).zfill(3) + '.pt')
                ####Vilidation Path###############################################################
                model.eval()
                forward_visual_result = []
                backward_visual_result = []
                ground_truth = []
                epoch_loss_eval = 0
                forward_visual_opti = []
                for i_batch, sample_batched in enumerate(dataloader_vid):
                    #opti vis
                    print('test************** i_batch', i_batch,
                          '******************')
                    batch_loss_eval, predicted_result_eval = pad_update(
                        model,
                        sample_batched,
                        with_attention_flag=with_attention_flag,
                        pad_flag=pad_flag,
                        motion_flag=motion_flag)
                    batch_loss_eval.detach_()
                    testing_loss_data.write(
                        str(batch_loss_eval.cpu().data.tolist()) + '\n')
                    testing_loss_data.flush()
                    epoch_loss_eval += batch_loss_eval
                    temp_f = weighted_mean_motion(predicted_result_eval,
                                                  with_attention_flag)
                    gt_f_12 = sample_batched['motion_f_01'].numpy()
                    forward_visual_result = np.append(forward_visual_result,
                                                      temp_f)
                    ground_truth = np.append(ground_truth, gt_f_12)
                    if debug_flag:
                        if i_batch * input_batch_size > 1500:
                            break
                data_length = len(dataloader_vid) * input_batch_size
                epoch_loss_eval_mean = epoch_loss_eval / len(dataloader_vid)
                forward_visual_result = forward_visual_result.reshape(
                    -1, 6) * dataloader_vid.dataset.motion_stds
                forward_visual_result[:, no_motion_flag] = 0
                #ground_truth = ground_truth.reshape(data_length,6)*kitti_dataset_test.motion_stds+kitti_dataset_test.motion_means
                ground_truth = ground_truth.reshape(
                    -1, 6) * dataloader_vid.dataset.motion_stds

                forward_visual_result_m = tf.eular2pose(forward_visual_result)
                ground_truth_m = tf.eular2pose(ground_truth)
                #forward_visual_result_m = tf.ses2poses(forward_visual_result)
                #ground_truth_m          = tf.ses2poses(ground_truth)
                rpe = None
                if ate_flag:
                    rot_eval, tra_eval = evaluate.evaluate(
                        ground_truth_m, forward_visual_result_m)
                    testing_ate_data.write(
                        str(np.mean(tra_eval)) + ' ' + str(np.mean(rot_eval)) +
                        '\n')
                    testing_ate_data.flush()
                    rpe = str(np.mean(rot_eval)) + ' ' + str(np.mean(tra_eval))
                if vis_flag:
                    vis.plot_two_path_with_gt(forward_visual_result_m,
                                              forward_visual_result_m,
                                              ground_truth_m, 10,
                                              'testing set forward')
                    vis.plot_epoch_training_validing(
                        epoch,
                        epoch_loss_visu_mean.detach().cpu().numpy(),
                        epoch_loss_eval_mean.detach().cpu().numpy())
                    if ate_flag:
                        vis.plot_epoch_training_validing_2(
                            epoch, np.mean(tra_train), np.mean(tra_eval), 22)

                plot_path([ground_truth_m, forward_visual_result_m], epoch,
                          args, rpe)
예제 #15
0
파일: run-gradio.py 프로젝트: marouenbg/dan
import gradio as gr
import os
from PIL import Image

os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'

#### options
parser = argparse.ArgumentParser()
parser.add_argument("-opt",
                    type=str,
                    default="test_setting2.yml",
                    help="Path to options YMAL file.")
parser.add_argument("-input_dir", type=str, default="../../../data_samples/")
parser.add_argument("-output_dir", type=str, default="../../../data_samples/")
args = parser.parse_args()
opt = option.parse(args.opt, is_train=False)
opt = option.dict_to_nonedict(opt)
model = create_model(opt)

if not osp.exists(args.output_dir):
    os.makedirs(args.output_dir)


def predict(img):
    # img = cv2.imread(args.input_dir)[:, :, [2, 1, 0]]
    img = img.transpose(2, 0, 1)[None] / 255
    img_t = torch.as_tensor(np.ascontiguousarray(img)).float()
    model.feed_data(img_t)
    model.test()
    sr = model.fake_SR.detach().float().cpu()[0]
    sr_im = tensor2img(sr)
예제 #16
0
def main():
    # options
    parser = argparse.ArgumentParser()
    parser.add_argument('-opt', type=str, required=True,
                        help='Path to options file.')
    opt = options.parse(parser.parse_args().opt, is_train=False)
    util.mkdirs(
        (path for key, path in opt['path'].items() if not key == 'pretrain_model_G'))
    opt = options.dict_to_nonedict(opt)

    util.setup_logger(None, opt['path']['log'],
                      'test.log', level=logging.INFO, screen=True)
    logger = logging.getLogger('base')
    logger.info(options.dict2str(opt))
    # Create test dataset and dataloader
    test_loaders = []
    znorm = False  # TMP
    for phase, dataset_opt in sorted(opt['datasets'].items()):
        test_set = create_dataset(dataset_opt)
        test_loader = create_dataloader(test_set, dataset_opt)
        logger.info('Number of test images in [{:s}]: {:d}'.format(
            dataset_opt['name'], len(test_set)))
        test_loaders.append(test_loader)
        # Temporary, will turn znorm on for all the datasets. Will need to introduce a variable for each dataset and differentiate each one later in the loop.
        if dataset_opt['znorm'] and znorm == False:
            znorm = True

    # Create model
    model = create_model(opt)

    for test_loader in test_loaders:
        test_set_name = test_loader.dataset.opt['name']
        logger.info('\nTesting [{:s}]...'.format(test_set_name))
        test_start_time = time.time()
        dataset_dir = os.path.join(opt['path']['results_root'], test_set_name)
        util.mkdir(dataset_dir)

        test_results = OrderedDict()
        test_results['psnr'] = []
        test_results['ssim'] = []
        test_results['psnr_y'] = []
        test_results['ssim_y'] = []

        for data in test_loader:
            need_HR = False if test_loader.dataset.opt['dataroot_HR'] is None else True

            model.feed_data(data, need_HR=need_HR)
            img_path = data['in_path'][0]
            img_name = os.path.splitext(os.path.basename(img_path))[0]

            model.test()  # test
            visuals = model.get_current_visuals(need_HR=need_HR)

            #if znorm the image range is [-1,1], Default: Image range is [0,1] # testing, each "dataset" can have a different name (not train, val or other)
            top_img = tensor2np(visuals['top_fake'])  # uint8
            bot_img = tensor2np(visuals['bottom_fake'])  # uint8

            # save images
            suffix = opt['suffix']
            if suffix:
                save_img_path = os.path.join(
                    dataset_dir, img_name + suffix)
            else:
                save_img_path = os.path.join(dataset_dir, img_name)
            util.save_img(top_img, save_img_path + '_top.png')
            util.save_img(bot_img, save_img_path + '_bot.png')


            #TODO: update to use metrics functions
            # calculate PSNR and SSIM
            if need_HR:
                #if znorm the image range is [-1,1], Default: Image range is [0,1] # testing, each "dataset" can have a different name (not train, val or other)
                gt_img = tensor2img(visuals['HR'], denormalize=znorm)  # uint8
                gt_img = gt_img / 255.
                sr_img = sr_img / 255.

                crop_border = test_loader.dataset.opt['scale']
                cropped_sr_img = sr_img[crop_border:-
                                        crop_border, crop_border:-crop_border, :]
                cropped_gt_img = gt_img[crop_border:-
                                        crop_border, crop_border:-crop_border, :]

                psnr = util.calculate_psnr(
                    cropped_sr_img * 255, cropped_gt_img * 255)
                ssim = util.calculate_ssim(
                    cropped_sr_img * 255, cropped_gt_img * 255)
                test_results['psnr'].append(psnr)
                test_results['ssim'].append(ssim)

                if gt_img.shape[2] == 3:  # RGB image
                    sr_img_y = bgr2ycbcr(sr_img, only_y=True)
                    gt_img_y = bgr2ycbcr(gt_img, only_y=True)
                    cropped_sr_img_y = sr_img_y[crop_border:-
                                                crop_border, crop_border:-crop_border]
                    cropped_gt_img_y = gt_img_y[crop_border:-
                                                crop_border, crop_border:-crop_border]
                    psnr_y = util.calculate_psnr(
                        cropped_sr_img_y * 255, cropped_gt_img_y * 255)
                    ssim_y = util.calculate_ssim(
                        cropped_sr_img_y * 255, cropped_gt_img_y * 255)
                    test_results['psnr_y'].append(psnr_y)
                    test_results['ssim_y'].append(ssim_y)
                    logger.info('{:20s} - PSNR: {:.6f} dB; SSIM: {:.6f}; PSNR_Y: {:.6f} dB; SSIM_Y: {:.6f}.'
                                .format(img_name, psnr, ssim, psnr_y, ssim_y))
                else:
                    logger.info(
                        '{:20s} - PSNR: {:.6f} dB; SSIM: {:.6f}.'.format(img_name, psnr, ssim))
            else:
                logger.info(img_name)

        #TODO: update to use metrics functions
        if need_HR:  # metrics
            # Average PSNR/SSIM results
            ave_psnr = sum(test_results['psnr']) / len(test_results['psnr'])
            ave_ssim = sum(test_results['ssim']) / len(test_results['ssim'])
            logger.info('----Average PSNR/SSIM results for {}----\n\tPSNR: {:.6f} dB; SSIM: {:.6f}\n'
                        .format(test_set_name, ave_psnr, ave_ssim))
            if test_results['psnr_y'] and test_results['ssim_y']:
                ave_psnr_y = sum(
                    test_results['psnr_y']) / len(test_results['psnr_y'])
                ave_ssim_y = sum(
                    test_results['ssim_y']) / len(test_results['ssim_y'])
                logger.info('----Y channel, average PSNR/SSIM----\n\tPSNR_Y: {:.6f} dB; SSIM_Y: {:.6f}\n'
                            .format(ave_psnr_y, ave_ssim_y))
예제 #17
0
#weighted average
    att_temp_f_e = -att_temp_f*np.exp(att_temp_f)
    #att_temp_f_e = np.exp(att_temp_f)
    #print(temp_f.shape,att_temp_f_e.shape)
    temp_f_w = temp_f*att_temp_f_e
    #print('temp_f_w',temp_f_w)
    temp_f_w_s = np.sum(np.sum(temp_f_w,2),2)
    att_temp_s = np.sum(np.sum(att_temp_f_e,2),2)
    temp_f = temp_f_w_s/att_temp_s
    #print('temp',temp_f.shape)
    return temp_f


if __name__ == '__main__':
    args = parse()
    valid_period = 5
    visualize_training_period = 5
    save_visualize_training_period = 5
    input_batch_size = args.batch_size
    finetune_flag = True
    use_gpu_flag = True
    with_attention_flag = True
    coor_layer_flag   = True
    no_pad_flag = False
    ################## init model###########################
    model = models.VONet.PADVOFeature(coor_layer_flag = coor_layer_flag)
    model = model.float()
    # normalization parameter
    # model and optimization
    if use_gpu_flag:
예제 #18
0
def main():
    # options
    parser = argparse.ArgumentParser()
    parser.add_argument('-opt', type=str, required=True, help='Path to options file.')
    opt = options.parse(parser.parse_args().opt, is_train=False)
    util.mkdirs((path for key, path in opt['path'].items() if not key == 'pretrain_model_G'))

    logger = util.get_root_logger(None, opt['path']['log'], 'test.log', level=logging.INFO, screen=True)
    logger = logging.getLogger('base')
    logger.info(options.dict2str(opt))

    scale = opt.get('scale', 4)

    # Create test dataset and dataloader
    test_loaders = []
    znorm = False  # TMP
    # znorm_list = []

    '''
    video_list = os.listdir(cfg.testset_dir)
    for idx_video in range(len(video_list)):
        video_name = video_list[idx_video]
        # dataloader
        test_set = TestsetLoader(cfg, video_name)
        test_loader = DataLoader(test_set, num_workers=1, batch_size=1, shuffle=False)
    '''

    for phase, dataset_opt in sorted(opt['datasets'].items()):
        test_set = create_dataset(dataset_opt)
        test_loader = create_dataloader(test_set, dataset_opt)
        logger.info('Number of test images in [{:s}]: {:d}'.format(dataset_opt['name'], len(test_set)))
        test_loaders.append(test_loader)
        # Temporary, will turn znorm on for all the datasets. Will need to introduce a variable for each dataset and differentiate each one later in the loop.
        # if dataset_opt.get['znorm'] and znorm == False: 
        #     znorm = True
        znorm = dataset_opt.get('znorm', False)
        # znorm_list.apped(znorm)

    # Create model
    model = create_model(opt)

    for test_loader in test_loaders:
        test_set_name = test_loader.dataset.opt['name']
        logger.info('\nTesting [{:s}]...'.format(test_set_name))
        test_start_time = time.time()
        dataset_dir = os.path.join(opt['path']['results_root'], test_set_name)
        util.mkdir(dataset_dir)

        test_results = OrderedDict()
        test_results['psnr'] = []
        test_results['ssim'] = []
        test_results['psnr_y'] = []
        test_results['ssim_y'] = []

        for data in test_loader:
            need_HR = False if test_loader.dataset.opt['dataroot_HR'] is None else True

            img_path = data['LR_path'][0]
            img_name = os.path.splitext(os.path.basename(img_path))[0]
            # tmp_vis(data['LR'][:,1,:,:,:], True)

            if opt.get('chop_forward', None):
                # data
                if len(data['LR'].size()) == 4:
                    b, n_frames, h_lr, w_lr = data['LR'].size()
                    LR_y_cube = data['LR'].view(b, -1, 1, h_lr, w_lr)  # b, t, c, h, w
                elif len(data['LR'].size()) == 5:  # for networks that work with 3 channel images
                    _, n_frames, _, _, _ = data['LR'].size()
                    LR_y_cube = data['LR']  # b, t, c, h, w

                # print(LR_y_cube.shape)
                # print(data['LR_bicubic'].shape)

                # crop borders to ensure each patch can be divisible by 2
                # TODO: this is modcrop, not sure if really needed, check (the dataloader already does modcrop)
                _, _, _, h, w = LR_y_cube.size()
                h = int(h // 16) * 16
                w = int(w // 16) * 16
                LR_y_cube = LR_y_cube[:, :, :, :h, :w]
                if isinstance(data['LR_bicubic'], torch.Tensor):
                    # SR_cb = data['LR_bicubic'][:, 1, :, :][:, :, :h * scale, :w * scale]
                    SR_cb = data['LR_bicubic'][:, 1, :h * scale, :w * scale]
                    # SR_cr = data['LR_bicubic'][:, 2, :, :][:, :, :h * scale, :w * scale]
                    SR_cr = data['LR_bicubic'][:, 2, :h * scale, :w * scale]

                SR_y = chop_forward(LR_y_cube, model, scale, need_HR=need_HR).squeeze(0)
                # SR_y = np.array(SR_y.data.cpu())
                if test_loader.dataset.opt.get('srcolors', None):
                    print(SR_y.shape, SR_cb.shape, SR_cr.shape)
                    sr_img = ycbcr_to_rgb(torch.stack((SR_y, SR_cb, SR_cr), -3))
                else:
                    sr_img = SR_y
            else:
                # data
                model.feed_data(data, need_HR=need_HR)
                # SR_y = net(LR_y_cube).squeeze(0)
                model.test()  # test
                visuals = model.get_current_visuals(need_HR=need_HR)
                # ds = torch.nn.AvgPool2d(2, stride=2, count_include_pad=False)
                # tmp_vis(ds(visuals['SR']), True)
                # tmp_vis(visuals['SR'], True)
                if test_loader.dataset.opt.get('y_only', None) and test_loader.dataset.opt.get('srcolors', None):
                    SR_cb = data['LR_bicubic'][:, 1, :, :]
                    SR_cr = data['LR_bicubic'][:, 2, :, :]
                    # tmp_vis(ds(SR_cb), True)
                    # tmp_vis(ds(SR_cr), True)
                    sr_img = ycbcr_to_rgb(torch.stack((visuals['SR'], SR_cb, SR_cr), -3))
                else:
                    sr_img = visuals['SR']

            # if znorm the image range is [-1,1], Default: Image range is [0,1] # testing, each "dataset" can have a different name (not train, val or other)
            sr_img = tensor2np(sr_img, denormalize=znorm)  # uint8

            # save images
            suffix = opt['suffix']
            if suffix:
                save_img_path = os.path.join(dataset_dir, img_name + suffix + '.png')
            else:
                save_img_path = os.path.join(dataset_dir, img_name + '.png')
            util.save_img(sr_img, save_img_path)

            # TODO: update to use metrics functions
            # calculate PSNR and SSIM
            if need_HR:
                # if znorm the image range is [-1,1], Default: Image range is [0,1] # testing, each "dataset" can have a different name (not train, val or other)
                gt_img = tensor2img(visuals['HR'], denormalize=znorm)  # uint8
                gt_img = gt_img / 255.
                sr_img = sr_img / 255.

                crop_border = test_loader.dataset.opt['scale']
                cropped_sr_img = sr_img[crop_border:-crop_border, crop_border:-crop_border, :]
                cropped_gt_img = gt_img[crop_border:-crop_border, crop_border:-crop_border, :]

                psnr = util.calculate_psnr(cropped_sr_img * 255, cropped_gt_img * 255)
                ssim = util.calculate_ssim(cropped_sr_img * 255, cropped_gt_img * 255)
                test_results['psnr'].append(psnr)
                test_results['ssim'].append(ssim)

                if gt_img.shape[2] == 3:  # RGB image
                    sr_img_y = bgr2ycbcr(sr_img, only_y=True)
                    gt_img_y = bgr2ycbcr(gt_img, only_y=True)
                    cropped_sr_img_y = sr_img_y[crop_border:-crop_border, crop_border:-crop_border]
                    cropped_gt_img_y = gt_img_y[crop_border:-crop_border, crop_border:-crop_border]
                    psnr_y = util.calculate_psnr(cropped_sr_img_y * 255, cropped_gt_img_y * 255)
                    ssim_y = util.calculate_ssim(cropped_sr_img_y * 255, cropped_gt_img_y * 255)
                    test_results['psnr_y'].append(psnr_y)
                    test_results['ssim_y'].append(ssim_y)
                    logger.info('{:20s} - PSNR: {:.6f} dB; SSIM: {:.6f}; PSNR_Y: {:.6f} dB; SSIM_Y: {:.6f}.' \
                                .format(img_name, psnr, ssim, psnr_y, ssim_y))
                else:
                    logger.info('{:20s} - PSNR: {:.6f} dB; SSIM: {:.6f}.'.format(img_name, psnr, ssim))
            else:
                logger.info(img_name)

        # TODO: update to use metrics functions
        if need_HR:  # metrics
            # Average PSNR/SSIM results
            ave_psnr = sum(test_results['psnr']) / len(test_results['psnr'])
            ave_ssim = sum(test_results['ssim']) / len(test_results['ssim'])
            logger.info('----Average PSNR/SSIM results for {}----\n\tPSNR: {:.6f} dB; SSIM: {:.6f}\n' \
                        .format(test_set_name, ave_psnr, ave_ssim))
            if test_results['psnr_y'] and test_results['ssim_y']:
                ave_psnr_y = sum(test_results['psnr_y']) / len(test_results['psnr_y'])
                ave_ssim_y = sum(test_results['ssim_y']) / len(test_results['ssim_y'])
                logger.info('----Y channel, average PSNR/SSIM----\n\tPSNR_Y: {:.6f} dB; SSIM_Y: {:.6f}\n' \
                            .format(ave_psnr_y, ave_ssim_y))
예제 #19
0
def main():

    # setting arguments and logger
    parser = argparse.ArgumentParser(description='Arguments')
    parser.add_argument('--opt',
                        type=str,
                        required=True,
                        help='path to json or yaml file')
    parser.add_argument('--name',
                        type=str,
                        required=True,
                        help='save_dir prefix name')
    parser.add_argument('--scale',
                        type=int,
                        required=True,
                        help='scale factor')
    parser.add_argument('--ps', type=int, default=None, help='patch size')
    parser.add_argument('--bs', type=int, default=None, help='batch_size')
    parser.add_argument('--lr', type=float, default=None, help='learning rate')
    parser.add_argument('--train_Y',
                        action='store_true',
                        default=False,
                        help='convert rgb to yuv and only train on Y channel')
    parser.add_argument('--gpu_ids',
                        type=str,
                        default=None,
                        help='which gpu to use')
    parser.add_argument('--use_chop',
                        action='store_true',
                        default=False,
                        help='whether to use split_forward in test phase')
    parser.add_argument('--pretrained',
                        default=None,
                        help='checkpoint path to resume from')

    args = parser.parse_args()
    args, lg = parse(args)

    # Tensorboard curve
    pretrained = args['solver']['pretrained']
    train_path = '../Tensorboard/train_{}'.format(args['name'])
    val_path = '../Tensorboard/val_{}'.format(args['name'])
    psnr_path = '../Tensorboard/psnr_{}'.format(args['name'])
    ssim_path = '../Tensorboard/ssim_{}'.format(args['name'])

    if pretrained is None:
        if osp.exists(train_path):
            lg.info('Remove dir: [{}]'.format(train_path))
            shutil.rmtree(train_path, True)
        if osp.exists(val_path):
            lg.info('Remove dir: [{}]'.format(val_path))
            shutil.rmtree(val_path, True)
        if osp.exists(psnr_path):
            lg.info('Remove dir: [{}]'.format(psnr_path))
            shutil.rmtree(psnr_path, True)
        if osp.exists(ssim_path):
            lg.info('Remove dir: [{}]'.format(ssim_path))
            shutil.rmtree(ssim_path, True)

    train_writer = SummaryWriter(train_path)
    val_writer = SummaryWriter(val_path)
    psnr_writer = SummaryWriter(psnr_path)
    ssim_writer = SummaryWriter(ssim_path)

    # random seed
    seed = args['solver']['manual_seed']
    random.seed(seed)
    torch.manual_seed(seed)

    # create train and val dataloader
    for phase, dataset_opt in args['datasets'].items():
        if phase == 'train':
            train_dataset = create_dataset(dataset_opt)
            train_loader = create_loader(train_dataset, dataset_opt)
            length = len(train_dataset)
            lg.info(
                'Number of train images: [{}], iters each epoch: [{}]'.format(
                    length, len(train_loader)))
        elif phase == 'val':
            val_dataset = create_dataset(dataset_opt)
            val_loader = create_loader(val_dataset, dataset_opt)
            length = len(val_dataset)
            lg.info(
                'Number of val images: [{}], iters each epoch: [{}]'.format(
                    length, len(val_loader)))
        elif phase == 'test':
            test_dataset = create_dataset(dataset_opt)
            test_loader = create_loader(test_dataset, dataset_opt)
            length = len(test_dataset)
            lg.info(
                'Number of test images: [{}], iters each epoch: [{}]'.format(
                    length, len(test_loader)))

    # create solver
    solver = create_solver(args)

    # training prepare
    solver_log = solver.get_current_log()
    NUM_EPOCH = args['solver']['num_epochs']
    cur_iter = -1
    start_epoch = solver_log['epoch']
    scale = args['scale']
    lg.info('Start Training from [{}] Epoch'.format(start_epoch))
    print_freq = args['print']['print_freq']
    val_step = args['solver']['val_step']

    # training
    for epoch in range(start_epoch, NUM_EPOCH + 1):
        solver_log['epoch'] = epoch

        train_loss_list = []
        for iter, data in enumerate(train_loader):
            cur_iter += 1
            solver.feed_data(data)
            iter_loss = solver.optimize_step()
            train_loss_list.append(iter_loss)

            # show on screen
            if (cur_iter % print_freq) == 0:
                lg.info(
                    'Epoch: {:4} | iter: {:3} | train_loss: {:.4f} | lr: {}'.
                    format(epoch, iter, iter_loss,
                           solver.get_current_learning_rate()))

        train_loss = round(sum(train_loss_list) / len(train_loss_list), 4)
        train_writer.add_scalar('loss', train_loss, epoch)
        solver_log['train_records']['train_loss'].append(train_loss)
        solver_log['train_records']['lr'].append(
            solver.get_current_learning_rate())

        epoch_is_best = False

        if (epoch % val_step) == 0:
            # Validation
            lg.info('Start Validation...')
            pbar = ProgressBar(len(val_loader))
            psnr_list = []
            ssim_list = []
            val_loss_list = []

            for iter, data in enumerate(val_loader):
                solver.feed_data(data)
                loss = solver.test()
                val_loss_list.append(loss)

                # calculate evaluation metrics
                visuals = solver.get_current_visual(need_np=True)
                psnr, ssim = calc_metrics(visuals['SR'],
                                          visuals['HR'],
                                          crop_border=scale,
                                          test_Y=True)
                psnr_list.append(psnr)
                ssim_list.append(ssim)
                pbar.update('')

            # save image
            solver.save_current_visual(epoch)

            avg_psnr = round(sum(psnr_list) / len(psnr_list), 2)
            avg_ssim = round(sum(ssim_list) / len(ssim_list), 4)
            val_loss = round(sum(val_loss_list) / len(val_loss_list), 4)
            val_writer.add_scalar('loss', val_loss, epoch)
            psnr_writer.add_scalar('psnr', avg_psnr, epoch)
            ssim_writer.add_scalar('ssim', avg_ssim, epoch)

            solver_log['val_records']['val_loss'].append(val_loss)
            solver_log['val_records']['psnr'].append(avg_psnr)
            solver_log['val_records']['ssim'].append(avg_ssim)

            # record the best epoch
            if solver_log['best_pred'] < avg_psnr:
                solver_log['best_pred'] = avg_psnr
                epoch_is_best = True
                solver_log['best_epoch'] = epoch
            lg.info(
                'PSNR: {:.2f} | SSIM: {:.4f} | Loss: {:.4f} | Best_PSNR: {:.2f} in Epoch: [{}]'
                .format(avg_psnr, avg_ssim, val_loss, solver_log['best_pred'],
                        solver_log['best_epoch']))

        solver.set_current_log(solver_log)
        solver.save_checkpoint(epoch, epoch_is_best)
        solver.save_current_log()

        # update lr
        solver.update_learning_rate(epoch)

    lg.info('===> Finished !')
예제 #20
0
def main():

    # parameters and flags
    args = parse()
    valid_period = 5
    visualize_training_period = 5
    save_visualize_training_period = 5
    input_batch_size = args.batch_size
    finetune_flag = False
    coor_layer_flag = False
    pad_flag = False
    with_attention_flag = False
    rpw_flag = False
    use_gpu_flag = True
    vis_flag = False
    motion_flag = [2, 4]
    data_balance_flag = False
    no_motion_flag = [d not in motion_flag for d in range(0, 6)]
    print(motion_flag, no_motion_flag)
    #camera_parameter=[450,180,225,225,225,90]
    #camera_parameter=[651,262,651,651,320,130]
    camera_parameter = [640, 180, 640, 640, 320, 90]
    image_size = (camera_parameter[1], camera_parameter[0])

    ################## init model###########################
    model = models.VONet.PADVONet(coor_layer_flag=coor_layer_flag)
    model = model.float()
    if use_gpu_flag:
        #model     = nn.DataParallel(model.cuda())
        model = model.cuda()
        print(model)
    if finetune_flag:
        model.load_state_dict(torch.load(args.model_load))
    # bn or gn, we can increase lr
    '''
    for name, child in model.named_children():
        if name[0:3]=='att':
           print(name + ' is unfrozen')
           for param in child.parameters():
               param.requires_grad = True
        else:
           print(name + ' is frozen')
           for param in child.parameters():
               param.requires_grad = False
    '''
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer,
                                               lr_lambda=LambdaLR(200, 0,
                                                                  50).step)
    print(optimizer)
    #ego_pre = ep.EgomotionPrediction()
    ################### load data####################
    # training data
    motion_files_path = args.motion_path
    path_files_path = args.image_list_path
    print(motion_files_path)
    print(path_files_path)
    # transform
    transforms_ = [
        transforms.Resize(image_size),
        transforms.ColorJitter(brightness=0.1,
                               contrast=0.1,
                               saturation=0.1,
                               hue=0.1),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ]
    kitti_dataset_t = dlr.SepeDataset(path_to_poses_files=motion_files_path,
                                      path_to_image_lists=path_files_path,
                                      transform_=transforms_,
                                      camera_parameter=camera_parameter,
                                      coor_layer_flag=coor_layer_flag)
    kitti_dataset_tv = dl.SepeDataset(path_to_poses_files=motion_files_path,
                                      path_to_image_lists=path_files_path,
                                      transform_=transforms_,
                                      camera_parameter=camera_parameter,
                                      coor_layer_flag=coor_layer_flag)

    #dataloader = DataLoader(kitti_dataset, batch_size=input_batch_size,shuffle=False ,num_workers=4,drop_last=True,sampler=kitti_dataset.sampler)
    dataloader = DataLoader(kitti_dataset_t,
                            batch_size=input_batch_size,
                            shuffle=True,
                            num_workers=4,
                            drop_last=True)
    if data_balance_flag:
        print('data balance by prob')
        dataloader = DataLoader(kitti_dataset_t,
                                batch_size=input_batch_size,
                                shuffle=False,
                                num_workers=4,
                                drop_last=True,
                                sampler=kitti_dataset.sampler)
    else:
        print('no data balance')
    dataloader_vis = DataLoader(kitti_dataset_tv,
                                batch_size=input_batch_size,
                                shuffle=False,
                                num_workers=4,
                                drop_last=True)
    # testing data
    motion_files_path_test = args.motion_path_test
    path_files_path_test = args.image_list_path_test
    print(motion_files_path_test)
    print(path_files_path_test)
    # transform
    transforms_ = [
        transforms.Resize(image_size),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ]

    kitti_dataset_test = dl.SepeDataset(
        path_to_poses_files=motion_files_path_test,
        path_to_image_lists=path_files_path_test,
        transform_=transforms_,
        camera_parameter=camera_parameter,
        norm_flag=1,
        coor_layer_flag=coor_layer_flag)

    dataloader_vid = DataLoader(kitti_dataset_test,
                                batch_size=input_batch_size,
                                shuffle=False,
                                num_workers=4,
                                drop_last=True)
    print(len(kitti_dataset_t), len(kitti_dataset_test))

    epoch_loss_visu_mean = 0
    epoch_loss_eval_mean = 0
    if vis_flag:
        vis = visualizer.Visualizer(args.visdom_ip, args.visdom_port)
        print('vis', args.visdom_ip, args.visdom_port)
    training_loss_data = open(
        '../saved_new/' + args.model_name + '_training.loss', 'a')
    testing_loss_data = open(
        '../saved_new/' + args.model_name + '_testing.loss', 'a')
    training_ate_data = open(
        '../saved_new/' + args.model_name + '_training.ate', 'a')
    testing_ate_data = open('../saved_new/' + args.model_name + '_testing.ate',
                            'a')
    ################## training   #######################
    for epoch in range(101):
        epoch_loss = 0
        result = []
        result = np.array(result)
        model.train()
        for i_batch, sample_batched in enumerate(dataloader):
            batch_loss, result = pad_update(
                model,
                sample_batched,
                with_attention_flag=with_attention_flag,
                pad_flag=pad_flag,
                motion_flag=motion_flag)
            #att_0 = result[1]
            #vis.plot_heat_map(att_0[0,0,:,:])
            epoch_loss += batch_loss
            if vis_flag:
                vis.plot_current_errors(
                    epoch, i_batch * input_batch_size / len(kitti_dataset_t),
                    batch_loss.data)
            print(epoch, '******', i_batch, '/', len(dataloader), '*******',
                  batch_loss.item())
            batch_loss.backward()
            optimizer.step()
        data_length = len(
            kitti_dataset_t) // input_batch_size * input_batch_size
        epoch_loss_mean = epoch_loss * input_batch_size / data_length
        if vis_flag:
            vis.plot_epoch_current_errors(epoch, epoch_loss_mean.data)
        lr_scheduler.step()

        ####Visualization Path###############################################################
        with torch.no_grad():
            if epoch % valid_period == 0:
                model.eval()
                forward_visual_result = []
                ground_truth = []
                epoch_loss_visu = 0
                for i_batch, sample_batched in enumerate(dataloader_vis):
                    print('visu************** i_batch', i_batch,
                          '******************')
                    model.zero_grad()
                    batch_loss, result = pad_update(
                        model,
                        sample_batched,
                        with_attention_flag=with_attention_flag,
                        pad_flag=pad_flag,
                        motion_flag=motion_flag)

                    att_0 = result[1]
                    if vis_flag:
                        vis.plot_heat_map(att_0[0, 0, :, :])
                    #batch_loss.backward()
                    batch_loss.detach_()
                    training_loss_data.write(
                        str(batch_loss.cpu().data.tolist()) + '\n')
                    training_loss_data.flush()
                    epoch_loss_visu += batch_loss
                    temp_f = weighted_mean_motion(result, with_attention_flag)
                    gt_f_12 = sample_batched['motion_f_01'].numpy()
                    forward_visual_result = np.append(forward_visual_result,
                                                      temp_f)
                    ground_truth = np.append(ground_truth, gt_f_12)
                data_length = len(
                    kitti_dataset_tv) // input_batch_size * input_batch_size
                epoch_loss_visu_mean = epoch_loss_visu * input_batch_size / data_length
                forward_visual_result = forward_visual_result.reshape(
                    data_length, 6) * kitti_dataset_t.motion_stds
                forward_visual_result[:, no_motion_flag] = 0
                #forward_visual_result[:,2]=1
                #ground_truth = ground_truth.reshape(data_length,6)*kitti_dataset.motion_stds+kitti_dataset.motion_means
                ground_truth = ground_truth.reshape(
                    data_length, 6) * kitti_dataset_t.motion_stds

                #forward_visual_result_m = tf.ses2poses(forward_visual_result)
                #ground_truth_m          = tf.ses2poses(ground_truth)
                forward_visual_result_m = tf.eular2pose(forward_visual_result)
                ground_truth_m = tf.eular2pose(ground_truth)
                if vis_flag:
                    vis.plot_path_with_gt(forward_visual_result_m,
                                          ground_truth_m, 5,
                                          'training set forward')

                rot_train, tra_train = evaluate.evaluate(
                    ground_truth_m, forward_visual_result_m)
                training_ate_data.write(
                    str(np.mean(tra_train)) + ' ' + str(np.mean(rot_train)) +
                    '\n')
                training_ate_data.flush()
                torch.save(
                    model.state_dict(), '../saved_model/model_' +
                    args.model_name + '_' + str(epoch).zfill(3) + '.pt')
                ####Vilidation Path###############################################################
                model.eval()
                forward_visual_result = []
                backward_visual_result = []
                ground_truth = []
                epoch_loss_eval = 0
                forward_visual_opti = []
                for i_batch, sample_batched in enumerate(dataloader_vid):
                    #opti vis
                    print('test************** i_batch', i_batch,
                          '******************')
                    batch_loss_eval, predicted_result_eval = pad_update(
                        model,
                        sample_batched,
                        with_attention_flag=with_attention_flag,
                        pad_flag=pad_flag,
                        motion_flag=motion_flag)
                    batch_loss_eval.detach_()
                    testing_loss_data.write(
                        str(batch_loss_eval.cpu().data.tolist()) + '\n')
                    testing_loss_data.flush()
                    epoch_loss_eval += batch_loss_eval
                    temp_f = weighted_mean_motion(predicted_result_eval,
                                                  with_attention_flag)
                    gt_f_12 = sample_batched['motion_f_01'].numpy()
                    forward_visual_result = np.append(forward_visual_result,
                                                      temp_f)
                    ground_truth = np.append(ground_truth, gt_f_12)
                data_length = len(
                    kitti_dataset_test) // input_batch_size * input_batch_size
                epoch_loss_eval_mean = epoch_loss_eval * input_batch_size / data_length
                forward_visual_result = forward_visual_result.reshape(
                    data_length, 6) * kitti_dataset_test.motion_stds
                forward_visual_result[:, no_motion_flag] = 0
                #forward_visual_result[:,2]=1
                #ground_truth = ground_truth.reshape(data_length,6)*kitti_dataset_test.motion_stds+kitti_dataset_test.motion_means
                ground_truth = ground_truth.reshape(
                    data_length, 6) * kitti_dataset_test.motion_stds

                #forward_visual_result_m = tf.ses2poses(forward_visual_result)
                #ground_truth_m          = tf.ses2poses(ground_truth)

                forward_visual_result_m = tf.eular2pose(forward_visual_result)
                ground_truth_m = tf.eular2pose(ground_truth)
                rot_eval, tra_eval = evaluate.evaluate(
                    ground_truth_m, forward_visual_result_m)

                testing_ate_data.write(
                    str(np.mean(tra_eval)) + ' ' + str(np.mean(rot_eval)) +
                    '\n')
                testing_ate_data.flush()
                if vis_flag:
                    vis.plot_two_path_with_gt(forward_visual_result_m,
                                              forward_visual_result_m,
                                              ground_truth_m, 10,
                                              'testing set forward')
                    vis.plot_epoch_training_validing(
                        epoch,
                        epoch_loss_visu_mean.detach().cpu().numpy(),
                        epoch_loss_eval_mean.detach().cpu().numpy())
                    vis.plot_epoch_training_validing_2(epoch,
                                                       np.mean(tra_train),
                                                       np.mean(tra_eval), 22)
예제 #21
0
def main():

    # parameters and flags
    args = parse()
    valid_period = 5
    visualize_training_period = 5
    save_visualize_training_period = 5
    input_batch_size = args.batch_size
    finetune_flag = False
    coor_layer_flag = False
    pad_flag = False
    with_attention_flag = False
    rpe_flag = True
    use_gpu_flag = True
    motion_flag = [0, 1, 2, 3, 4, 5]
    data_balance_flag = False
    no_motion_flag = [d not in motion_flag for d in range(0, 6)]
    print(motion_flag, no_motion_flag)
    #camera_parameter=[450,180,225,225,225,90]
    #camera_parameter=[651,262,651,651,320,130]
    camera_parameter = [640, 180, 640, 640, 320, 90]
    image_size = (camera_parameter[1], camera_parameter[0])

    ################## init model###########################
    vo_predictor = DCVO()
    #ego_pre = ep.EgomotionPrediction()
    ################### load data####################
    # training data
    motion_files_path = args.motion_path
    path_files_path = args.image_list_path
    print(motion_files_path)
    print(path_files_path)
    # transform
    transforms_ = [
        transforms.Resize(image_size),
        transforms.ColorJitter(brightness=0.1,
                               contrast=0.1,
                               saturation=0.1,
                               hue=0.1),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ]
    kitti_dataset = data.data_loader.SepeDataset(
        path_to_poses_files=motion_files_path,
        path_to_image_lists=path_files_path,
        transform_=transforms_,
        camera_parameter=camera_parameter,
        coor_layer_flag=coor_layer_flag)

    #dataloader = DataLoader(kitti_dataset, batch_size=input_batch_size,shuffle=False ,num_workers=4,drop_last=True,sampler=kitti_dataset.sampler)
    dataloader = DataLoader(kitti_dataset,
                            batch_size=input_batch_size,
                            shuffle=True,
                            num_workers=2,
                            drop_last=True)
    if data_balance_flag:
        print('data balance by prob')
        dataloader = DataLoader(kitti_dataset,
                                batch_size=input_batch_size,
                                shuffle=False,
                                num_workers=4,
                                drop_last=True,
                                sampler=kitti_dataset.sampler)
    else:
        print('no data balance')
    dataloader_vis = DataLoader(kitti_dataset,
                                batch_size=input_batch_size,
                                shuffle=False,
                                num_workers=4,
                                drop_last=True)
    # testing data
    motion_files_path_test = args.motion_path_test
    path_files_path_test = args.image_list_path_test
    print(motion_files_path_test)
    print(path_files_path_test)
    # transform
    transforms_ = [
        transforms.Resize(image_size),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ]

    kitti_dataset_test = data.data_loader.SepeDataset(
        path_to_poses_files=motion_files_path_test,
        path_to_image_lists=path_files_path_test,
        transform_=transforms_,
        camera_parameter=camera_parameter,
        norm_flag=1,
        coor_layer_flag=coor_layer_flag)

    dataloader_vid = DataLoader(kitti_dataset_test,
                                batch_size=input_batch_size,
                                shuffle=False,
                                num_workers=4,
                                drop_last=True)
    print(len(kitti_dataset), len(kitti_dataset_test))

    epoch_loss_visu_mean = 0
    epoch_loss_eval_mean = 0
    vis = visualizer.Visualizer(args.visdom_ip, args.visdom_port)
    print('vis', args.visdom_ip, args.visdom_port)
    training_loss_data = open(
        '../saved_new/' + args.model_name + '_training.loss', 'a')
    testing_loss_data = open(
        '../saved_new/' + args.model_name + '_testing.loss', 'a')
    training_ate_data = open(
        '../saved_new/' + args.model_name + '_training.ate', 'a')
    testing_ate_data = open('../saved_new/' + args.model_name + '_testing.ate',
                            'a')
    ################## training   #######################
    for epoch in range(101):
        epoch_loss = 0
        result = []
        result = np.array(result)
        vo_predictor.train()
        for i_batch, sample_batched in enumerate(dataloader):
            batch_loss, result = dc_update(
                vo_predictor,
                sample_batched,
                with_attention_flag=with_attention_flag,
                pad_flag=pad_flag,
                motion_flag=motion_flag)
            #att_0 = result[1]
            #vis.plot_heat_map(att_0[0,0,:,:])
            epoch_loss += batch_loss
            vis.plot_current_errors(
                epoch, i_batch * input_batch_size / len(kitti_dataset),
                batch_loss.data)
            print(epoch, '******', i_batch, '/', len(dataloader), '*******',
                  batch_loss.item())
            batch_loss.backward()
            vo_predictor.step()
        data_length = len(kitti_dataset) // input_batch_size * input_batch_size
        epoch_loss_mean = epoch_loss * input_batch_size / data_length
        vis.plot_epoch_current_errors(epoch, epoch_loss_mean.data)
        vo_predictor.lstep()

        ####Visualization Path###############################################################
        with torch.no_grad():
            if epoch % valid_period == 0:
                vo_predictor.eval()
                forward_visual_result = []
                ground_truth = []
                epoch_loss_visu = 0
                for i_batch, sample_batched in enumerate(dataloader_vis):
                    print('visu************** i_batch', i_batch,
                          '******************')
                    vo_predictor.zero_grad()
                    batch_loss, result = dc_update(
                        vo_predictor,
                        sample_batched,
                        with_attention_flag=with_attention_flag,
                        pad_flag=pad_flag,
                        motion_flag=motion_flag)

                    att_0 = result[1]
                    vis.plot_heat_map(att_0[0, 0, :, :])
                    #batch_loss.backward()
                    batch_loss.detach_()
                    training_loss_data.write(
                        str(batch_loss.cpu().data.tolist()) + '\n')
                    training_loss_data.flush()
                    epoch_loss_visu += batch_loss
                    temp_f = weighted_mean_motion(result, with_attention_flag)
                    gt_f_12 = sample_batched['motion_f_01'].numpy()
                    forward_visual_result = np.append(forward_visual_result,
                                                      temp_f)
                    ground_truth = np.append(ground_truth, gt_f_12)
                data_length = len(
                    kitti_dataset) // input_batch_size * input_batch_size
                epoch_loss_visu_mean = epoch_loss_visu * input_batch_size / data_length
                forward_visual_result = forward_visual_result.reshape(
                    data_length, 6) * kitti_dataset.motion_stds
                forward_visual_result[:, no_motion_flag] = 0
                if no_motion_flag[2] == True:
                    forward_visual_result[:, 2] = 1
                #ground_truth = ground_truth.reshape(data_length,6)*kitti_dataset.motion_stds+kitti_dataset.motion_means
                ground_truth = ground_truth.reshape(
                    data_length, 6) * kitti_dataset.motion_stds

                #forward_visual_result_m = tf.ses2poses(forward_visual_result)
                #ground_truth_m          = tf.ses2poses(ground_truth)
                forward_visual_result_m = tf.eular2pose2(
                    forward_visual_result, 1)
                ground_truth_m = tf.eular2pose2(ground_truth, 1)
                if rpe_flag:
                    rot_train, tra_train = evaluate.evaluate(
                        ground_truth_m, forward_visual_result_m)
                    training_ate_data.write(
                        str(np.mean(tra_train)) + ' ' +
                        str(np.mean(rot_train)) + '\n')
                    training_ate_data.flush()
                vis.plot_path_with_gt(forward_visual_result_m, ground_truth_m,
                                      5, 'training set forward')
                #torch.save(model.state_dict(), '../saved_model/model_'+args.model_name+'_'+str(epoch).zfill(3)+'.pt')
                vo_predictor.save(args.model_name, epoch)
                ####Vilidation Path###############################################################
                vo_predictor.eval()
                forward_visual_result = []
                backward_visual_result = []
                ground_truth = []
                epoch_loss_eval = 0
                forward_visual_opti = []
                for i_batch, sample_batched in enumerate(dataloader_vid):
                    #opti vis
                    print('test************** i_batch', i_batch,
                          '******************')
                    batch_loss_eval, predicted_result_eval = dc_update(
                        vo_predictor,
                        sample_batched,
                        with_attention_flag=with_attention_flag,
                        pad_flag=pad_flag,
                        motion_flag=motion_flag)
                    batch_loss_eval.detach_()
                    testing_loss_data.write(
                        str(batch_loss_eval.cpu().data.tolist()) + '\n')
                    testing_loss_data.flush()
                    epoch_loss_eval += batch_loss_eval
                    temp_f = weighted_mean_motion(predicted_result_eval,
                                                  with_attention_flag)
                    gt_f_12 = sample_batched['motion_f_01'].numpy()
                    forward_visual_result = np.append(forward_visual_result,
                                                      temp_f)
                    ground_truth = np.append(ground_truth, gt_f_12)
                data_length = len(
                    kitti_dataset_test) // input_batch_size * input_batch_size
                epoch_loss_eval_mean = epoch_loss_eval * input_batch_size / data_length
                forward_visual_result = forward_visual_result.reshape(
                    data_length, 6) * kitti_dataset_test.motion_stds
                forward_visual_result[:, no_motion_flag] = 0
                if no_motion_flag[2] == True:
                    forward_visual_result[:, 2] = 1
                #ground_truth = ground_truth.reshape(data_length,6)*kitti_dataset_test.motion_stds+kitti_dataset_test.motion_means
                ground_truth = ground_truth.reshape(
                    data_length, 6) * kitti_dataset_test.motion_stds

                #forward_visual_result_m = tf.ses2poses(forward_visual_result)
                #ground_truth_m          = tf.ses2poses(ground_truth)

                forward_visual_result_m = tf.eular2pose2(
                    forward_visual_result, 1)
                ground_truth_m = tf.eular2pose2(ground_truth, 1)
                vis.plot_two_path_with_gt(forward_visual_result_m,
                                          forward_visual_result_m,
                                          ground_truth_m, 10,
                                          'testing set forward')
                vis.plot_epoch_training_validing(
                    epoch,
                    epoch_loss_visu_mean.detach().cpu().numpy(),
                    epoch_loss_eval_mean.detach().cpu().numpy())
                if rpe_flag:
                    rot_eval, tra_eval = evaluate.evaluate(
                        ground_truth_m, forward_visual_result_m)
                    testing_ate_data.write(
                        str(np.mean(tra_eval)) + ' ' + str(np.mean(rot_eval)) +
                        '\n')
                    testing_ate_data.flush()
                    vis.plot_epoch_training_validing_2(epoch,
                                                       np.mean(tra_train),
                                                       np.mean(tra_eval), 22)
예제 #22
0
    parser = argparse.ArgumentParser(description='FSRCNN Demo')
    parser.add_argument('--opt', required=True)
    parser.add_argument('--name', required=True)
    parser.add_argument('--scale', default=3, type=int)
    parser.add_argument('--ps', default=48, type=int, help='patch_size')
    parser.add_argument('--bs', default=16, type=int, help='batch_size')
    parser.add_argument('--lr', default=1e-3, type=float, help='learning rate')
    parser.add_argument('--gpu_ids', default=None)
    parser.add_argument('--resume', action='store_true', default=False)
    parser.add_argument('--resume_path', default=None)
    parser.add_argument('--qat', action='store_true', default=False)
    parser.add_argument('--qat_path', default=None)

    args = parser.parse_args()
    args, lg = parse(args)

    # Tensorboard save directory
    resume = args['solver']['resume']
    tensorboard_path = 'Tensorboard/{}'.format(args['name'])

    if resume == False:
        if osp.exists(tensorboard_path):
            shutil.rmtree(tensorboard_path, True)
            lg.info('Remove dir: [{}]'.format(tensorboard_path))
    writer = SummaryWriter(tensorboard_path)

    # create dataset
    train_data = DIV2K(args['datasets']['train'])
    lg.info('Create train dataset successfully!')
    lg.info('Training: [{}] iterations for each epoch'.format(len(train_data)))
예제 #23
0
        return l1, l2

    def save_network(self):
        print("saving network parameters")
        folder_path = os.path.join(self.opts.output_path, 'model')
        if not os.path.exists(folder_path):
            os.makedirs(folder_path)
        torch.save(
            self.network.state_dict(),
            os.path.join(folder_path, "model_dict_{}".format(self.model_num)))
        self.model_num += 1
        if self.model_num >= 5: self.model_num = 0


if __name__ == '__main__':
    opts = options.parse()
    # net = UNet(opts).cuda()
    # Change loading module for single test
    net = Sino_repair_net(opts, [0, 1], load_model=True)
    net.cuda()
    # print(net)

    import matplotlib.pylab as plt
    import numpy as np

    def plot_img(img):
        # Pass in the index to read one of the sinogram
        fig, ax = plt.subplots(1, 1, figsize=(10, 10))
        ax.set_title("Original (Sinogram)")
        ax.set_xlabel("Projection position (pixels)")
        ax.set_ylabel("Projection angle (deg)")
from __future__ import print_function
import csv
import os
import numpy as np
import tensorflow as tf
import options
import importlib

opt = options.parse()

opt.data = 'stage1'  #Defaulting to stage1 data
dl = (importlib.import_module("dataloader." + opt.data)).get_data_loader(opt)

# Dir to save the log
log_dir = "oldLogs/"
model_dir = "cnn10-1/"
if not os.path.exists(log_dir + model_dir):
    os.makedirs(os.path.dirname(log_dir + model_dir))


def expand_last_dim(*input_data):
    res = []
    for in_data in input_data:
        res.append(np.expand_dims(in_data, axis=len(in_data.shape)))
    if len(res) == 1:
        return res[0]
    else:
        return res


def conv_bn_relu(input, kernel_shape, stride, bias_shape, is_training):
예제 #25
0
def main(args):
    curses.wrapper(CLI(options.parse(args)).run)
예제 #26
0
파일: test.py 프로젝트: marouenbg/dan
import numpy as np
import torch
from IPython import embed

import options as option
from models import create_model

sys.path.insert(0, "../../")
import utils as util
from data import create_dataloader, create_dataset
from data.util import bgr2ycbcr

#### options
parser = argparse.ArgumentParser()
parser.add_argument("-opt", type=str, required=True, help="Path to options YMAL file.")
opt = option.parse(parser.parse_args().opt, is_train=False)

opt = option.dict_to_nonedict(opt)

#### mkdir and logger
util.mkdirs(
    (
        path
        for key, path in opt["path"].items()
        if not key == "experiments_root"
        and "pretrain_model" not in key
        and "resume" not in key
    )
)

os.system("rm ./result")
예제 #27
0

@app.route('/')
def index():
    return flask.templating.render_template('index.html',
                                            transports=transports)


@socketio.on('start')
def start(data):
    life = Life.from_file(opts.input_file) \
            if opts.input_file \
            else Life.random(int(data['height']), int(data['width']))
    flask_socketio.emit('generation', life.matrix.tolist())


@socketio.on('next')
def next(grid):
    life = Life(grid)
    life = life.next(opts.style)
    flask_socketio.emit('generation', life.matrix.tolist())


if __name__ == "__main__":
    socketio.run(app)
else:
    args = sys.argv[sys.argv.index('--') + 1:] \
           if '--' in sys.argv \
           else sys.argv[1:]
    opts = options.parse(args)
예제 #28
0
def main():
    #### options
    parser = argparse.ArgumentParser()
    parser.add_argument('-opt', type=str, help='Path to option YMAL file.')
    parser.add_argument('--launcher', choices=['none', 'pytorch'], default='none',
                        help='job launcher')
    parser.add_argument('--local_rank', type=int, default=0)
    args = parser.parse_args()
    opt = option.parse(args.opt, is_train=True)

    #### distributed training settings
    if args.launcher == 'none':  # disabled distributed training
        opt['dist'] = False
        rank = -1
        print('Disabled distributed training.')
    else:
        opt['dist'] = True
        init_dist()
        world_size = torch.distributed.get_world_size()
        rank = torch.distributed.get_rank()

    #### loading resume state if exists
    if opt['path'].get('resume_state', None):
        # distributed resuming: all load into default GPU
        device_id = torch.cuda.current_device()
        resume_state = torch.load(opt['path']['resume_state'],
                                  map_location=lambda storage, loc: storage.cuda(device_id))
        option.check_resume(opt, resume_state['iter'])  # check resume options
    else:
        resume_state = None

    #### mkdir and loggers
    if rank <= 0:  # normal training (rank -1) OR distributed training (rank 0)
        if resume_state is None:
            print(opt['path'])
            util.mkdir_and_rename(
                opt['path']['experiments_root'])  # rename experiment folder if exists
            util.mkdirs((path for key, path in opt['path'].items() if not key == 'experiments_root'
                         and 'pretrain_model' not in key and 'resume' not in key and path is not None))

        # config loggers. Before it, the log will not work
        util.setup_logger('base', opt['path']['log'], 'train_' + opt['name'], level=logging.INFO,
                          screen=True, tofile=True)
        util.setup_logger('val', opt['path']['log'], 'val_' + opt['name'], level=logging.INFO,
                          screen=True, tofile=True)
        logger = logging.getLogger('base')
        logger.info(option.dict2str(opt))
        # tensorboard logger
        if opt['use_tb_logger'] and 'debug' not in opt['name']:
            version = float(torch.__version__[0:3])
            if version >= 1.1:  # PyTorch 1.1
                from torch.utils.tensorboard import SummaryWriter
            else:
                logger.info(
                    'You are using PyTorch {}. Tensorboard will use [tensorboardX]'.format(version))
                from tensorboardX import SummaryWriter
            trial = 0
            while os.path.isdir('../Loggers/' + opt['name'] + '/' + str(trial)):
                trial += 1
            tb_logger = SummaryWriter(log_dir='../Loggers/' + opt['name'] + '/' + str(trial))
    else:
        util.setup_logger('base', opt['path']['log'], 'train', level=logging.INFO, screen=True)
        logger = logging.getLogger('base')

    # convert to NoneDict, which returns None for missing keys
    opt = option.dict_to_nonedict(opt)

    # -------------------------------------------- ADDED --------------------------------------------
    l1_loss = torch.nn.L1Loss()
    mse_loss = torch.nn.MSELoss()
    calc_lpips = PerceptualLossLPIPS()
    if torch.cuda.is_available():
        l1_loss = l1_loss.cuda()
        mse_loss = mse_loss.cuda()
    # -----------------------------------------------------------------------------------------------

    #### random seed
    seed = opt['train']['manual_seed']
    if seed is None:
        seed = random.randint(1, 10000)
    if rank <= 0:
        logger.info('Random seed: {}'.format(seed))
    util.set_random_seed(seed)

    torch.backends.cudnn.benckmark = True
    # torch.backends.cudnn.deterministic = True

    #### create train and val dataloader
    dataset_ratio = 200  # enlarge the size of each epoch
    for phase, dataset_opt in opt['datasets'].items():
        if phase == 'train':
            train_set = create_dataset(dataset_opt)
            train_size = int(math.ceil(len(train_set) / dataset_opt['batch_size']))
            total_iters = int(opt['train']['niter'])
            total_epochs = int(math.ceil(total_iters / train_size))
            if opt['dist']:
                train_sampler = DistIterSampler(train_set, world_size, rank, dataset_ratio)
                total_epochs = int(math.ceil(total_iters / (train_size * dataset_ratio)))
            else:
                train_sampler = None
            train_loader = create_dataloader(train_set, dataset_opt, opt, train_sampler)
            if rank <= 0:
                logger.info('Number of train images: {:,d}, iters: {:,d}'.format(
                    len(train_set), train_size))
                logger.info('Total epochs needed: {:d} for iters {:,d}'.format(
                    total_epochs, total_iters))
        elif phase == 'val':
            val_set = create_dataset(dataset_opt)
            val_loader = create_dataloader(val_set, dataset_opt, opt, None)
            if rank <= 0:
                logger.info('Number of val images in [{:s}]: {:d}'.format(
                    dataset_opt['name'], len(val_set)))
        else:
            raise NotImplementedError('Phase [{:s}] is not recognized.'.format(phase))
    assert train_loader is not None

    #### create model
    model = Model(opt)

    #### resume training
    if resume_state:
        logger.info('Resuming training from epoch: {}, iter: {}.'.format(
            resume_state['epoch'], resume_state['iter']))

        start_epoch = resume_state['epoch']
        current_step = resume_state['iter']
        model.resume_training(resume_state)  # handle optimizers and schedulers
    else:
        current_step = 0
        start_epoch = 0

    #### training
    logger.info('Start training from epoch: {:d}, iter: {:d}'.format(start_epoch, current_step))
    for epoch in range(start_epoch, total_epochs + 1):
        if opt['dist']:
            train_sampler.set_epoch(epoch)
        train_bar = tqdm(train_loader, desc='[%d/%d]' % (epoch, total_epochs))
        for bus, train_data in enumerate(train_bar):

             # validation
            if epoch % opt['train']['val_freq'] == 0 and bus == 0 and rank <= 0:
                avg_ssim = avg_psnr = avg_lpips = val_pix_err_f = val_pix_err_nf = val_mean_color_err = 0.0
                print("into validation!")
                idx = 0
                val_bar = tqdm(val_loader, desc='[%d/%d]' % (epoch, total_epochs))
                for val_data in val_bar:
                    idx += 1
                    img_name = os.path.splitext(os.path.basename(val_data['LQ_path'][0]))[0]
                    img_dir = os.path.join(opt['path']['val_images'], img_name)
                    util.mkdir(img_dir)

                    model.feed_data(val_data)
                    model.test()

                    visuals = model.get_current_visuals()
                    sr_img = util.tensor2img(visuals['SR'])  # uint8
                    gt_img = util.tensor2img(visuals['GT'])  # uint8
                    lq_img = util.tensor2img(visuals['LQ'])  # uint8
                    #nr_img = util.tensor2img(visuals['NR'])  # uint8
                    #nf_img = util.tensor2img(visuals['NF'])  # uint8
                    #nh_img = util.tensor2img(visuals['NH'])  # uint8


                    #print("Great! images got into here.")

                    # Save SR images for reference
                    save_sr_img_path = os.path.join(img_dir,
                                                 '{:s}_{:d}_sr.png'.format(img_name, current_step))
                    save_nr_img_path = os.path.join(img_dir,
                                                 '{:s}_{:d}_lq.png'.format(img_name, current_step))
                    #save_nf_img_path = os.path.join(img_dir,
                                                # 'bs_{:s}_{:d}_nr.png'.format(img_name, current_step)) 
                    #save_nh_img_path = os.path.join(img_dir,
                                                # 'bs_{:s}_{:d}_nh.png'.format(img_name, current_step)) 
                    util.save_img(sr_img, save_sr_img_path)
                    util.save_img(lq_img, save_nr_img_path)
                    #util.save_img(nf_img, save_nf_img_path)
                    #util.save_img(nh_img, save_nh_img_path)


                    #print("Saved")
                    # calculate PSNR
                    gt_img = gt_img / 255.
                    sr_img = sr_img / 255.
                    #nf_img = nf_img / 255.
                    lq_img = lq_img / 255.
                    #cropped_lq_img = lq_img[crop_size:-crop_size, crop_size:-crop_size, :]
                    #cropped_nr_img = nr_img[crop_size:-crop_size, crop_size:-crop_size, :]
                    avg_psnr += util.calculate_psnr(sr_img * 255, gt_img * 255)
                    avg_ssim += util.calculate_ssim(sr_img * 255, gt_img * 255)
                    avg_lpips += calc_lpips(visuals['SR'], visuals['GT'])
                    #avg_psnr_n += util.calculate_psnr(cropped_lq_img * 255, cropped_nr_img * 255)

                    # ----------------------------------------- ADDED -----------------------------------------
                    val_pix_err_nf += l1_loss(visuals['SR'], visuals['GT'])
                    val_mean_color_err += mse_loss(visuals['SR'].mean(2).mean(1), visuals['GT'].mean(2).mean(1))
                    # -----------------------------------------------------------------------------------------
                
                
                avg_psnr = avg_psnr / idx
                avg_ssim = avg_ssim / idx
                avg_lpips = avg_lpips / idx
                val_pix_err_f /= idx
                val_pix_err_nf /= idx
                val_mean_color_err /= idx



                # log
                logger.info('# Validation # PSNR: {:.4e},'.format(avg_psnr))
                logger.info('# Validation # SSIM: {:.4e},'.format(avg_ssim))
                logger.info('# Validation # LPIPS: {:.4e},'.format(avg_lpips))
                logger_val = logging.getLogger('val')  # validation logger
                logger_val.info('<epoch:{:3d}, iter:{:8,d}> psnr: {:.4e} ssim: {:.4e} lpips: {:.4e}'.format(
                    epoch, current_step, avg_psnr, avg_ssim, avg_lpips))
                # tensorboard logger
                if opt['use_tb_logger'] and 'debug' not in opt['name']:
                    tb_logger.add_scalar('val_psnr', avg_psnr, current_step)
                    tb_logger.add_scalar('val_ssim', avg_ssim, current_step)
                    tb_logger.add_scalar('val_lpips', avg_lpips, current_step)
                    tb_logger.add_scalar('val_pix_err_nf', val_pix_err_nf, current_step)
                    tb_logger.add_scalar('val_mean_color_err', val_mean_color_err, current_step)

            current_step += 1
            if current_step > total_iters:
                break
            #### update learning rate
            model.update_learning_rate(current_step, warmup_iter=opt['train']['warmup_iter'])

            #### training
            model.feed_data(train_data)
            model.optimize_parameters(current_step)
            model.clear_data()
            #### tb_logger
            if current_step % opt['logger']['tb_freq'] == 0:
                logs = model.get_current_log()
                if opt['use_tb_logger'] and 'debug' not in opt['name']:
                    for k, v in logs.items():
                        if rank <= 0:
                            tb_logger.add_scalar(k, v, current_step)

            
            #### logger
            if epoch % opt['logger']['print_freq'] == 0  and epoch != 0 and bus == 0:
                logs = model.get_current_log()
                message = '<epoch:{:3d}, iter:{:8,d}, lr:{:.3e}> '.format(
                    epoch, current_step, model.get_current_learning_rate())
                for k, v in logs.items():
                    message += '{:s}: {:.4e} '.format(k, v)
                if rank <= 0:
                    logger.info(message)

           
            #### save models and training states
            if epoch % opt['logger']['save_checkpoint_freq'] == 0 and epoch != 0 and bus == 0:
                if rank <= 0:
                    logger.info('Saving models and training states.')
                    model.save(current_step)
                    model.save_training_state(epoch, current_step)

    if rank <= 0:
        logger.info('Saving the final model.')
        model.save('latest')
        logger.info('End of training.')