Esempio n. 1
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument('-cfg',
                        "--config_file", default="", help="path to config file", type=str
                        )
    parser.add_argument("opts", help="Modify config options using the command-line", default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    # set pretrian = False to avoid loading weight repeatedly
    cfg.MODEL.PRETRAIN = False
    cfg.freeze()

    logger = setup_logger("reid_baseline", False, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
    logger.info("Running with config:\n{}".format(cfg))


    test_dataloader, num_query, _ = get_test_dataloader(cfg, test_phase=False)

    distmat_paths = [cfg.TEST.DISTMAT1, cfg.TEST.DISTMAT2, cfg.TEST.DISTMAT3,
                     cfg.TEST.DISTMAT4, cfg.TEST.DISTMAT5, cfg.TEST.DISTMAT6]
    # 加载dist_mats
    dist_mats = []

    cnt = 0
    thresh = 3
    for distmat_path in distmat_paths:
        if os.path.isfile(distmat_path):
            f = h5py.File(distmat_path, 'r')
            #mat = f['dist_mat'][()]

            if cnt < thresh:
                mat = f['dist_mat1'][()]
            else:
                mat = f['dist_mat1'][()]

            mat = mat[np.newaxis, ...]
            dist_mats.append(mat)
            f.close()
            cnt += 1

    logger.info(f'Average {cnt} results')
    dist_mat = np.concatenate(dist_mats, axis=0).mean(axis=0)

    inference_with_distmat(cfg, test_dataloader, num_query, dist_mat)
Esempio n. 2
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument('-cfg',
                        "--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    # set pretrian = False to avoid loading weight repeatedly
    cfg.MODEL.PRETRAIN = False
    cfg.DATASETS.PRELOAD_IMAGE = False
    cfg.freeze()

    logger = setup_logger("reid_baseline", False, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
    logger.info("Running with config:\n{}".format(cfg))

    cudnn.benchmark = True

    model = build_model(cfg, 0)
    #print('model', model)
    model = model.cuda()
    model.load_params_wo_fc(torch.load(cfg.TEST.WEIGHT))

    test_dataloader, num_query, _ = get_test_dataloader(cfg, test_phase=False)

    #inference_no_rerank(cfg, model, test_dataloader, num_query)
    #inference(cfg, model, test_dataloader, num_query)
    #inference_aligned(cfg, model, test_dataloader, num_query) # using flipped image

    inference_aligned_flipped(cfg,
                              model,
                              test_dataloader,
                              num_query,
                              use_local_feature=False,
                              use_rerank=True,
                              use_cross_feature=True)
Esempio n. 3
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument('-cfg',
                        "--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    gpus = os.environ[
        "CUDA_VISIBLE_DEVICES"] if "CUDA_VISIBLE_DEVICES" in os.environ else '0'
    gpus = [int(i) for i in gpus.split(',')]
    num_gpus = len(gpus)

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    # set pretrian = False to avoid loading weight repeatedly
    cfg.MODEL.PRETRAIN = False
    cfg.freeze()

    logger = setup_logger("reid_baseline", False, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
    logger.info("Running with config:\n{}".format(cfg))

    cudnn.benchmark = True

    model = build_model(cfg, 0)
    model.load_params_wo_fc(torch.load(cfg.TEST.WEIGHT))
    if num_gpus > 1:
        model = nn.DataParallel(model)
    model = model.cuda()

    print('prepare test set ...')
    test_dataloader_collection, num_query_collection, test_items_collection = get_test_dataloader(
        cfg)

    inference(cfg,
              model,
              test_dataloader_collection,
              num_query_collection,
              is_vis=True,
              test_collection=test_items_collection)
Esempio n. 4
0
    def __init__(self, cfg):
        self.cfg = cfg
        self.mean = torch.tensor([0.485 * 255, 0.456 * 255,
                                  0.406 * 255]).view(1, 3, 1, 1)
        self.std = torch.tensor([0.229 * 255, 0.224 * 255,
                                 0.225 * 255]).view(1, 3, 1, 1)

        self.model = build_model(cfg, 0)
        self.tng_dataloader, self.val_dataloader, self.num_query = get_test_dataloader(
            cfg)
        self.model = self.model.cuda()
        self.model.load_params_wo_fc(torch.load(cfg.TEST.WEIGHT))

        print('extract person features ...')
        self.get_distmat()
Esempio n. 5
0
def main(args):
    in_feats = 1
    n_hidden = 256
    n_classes = 8
    n_layers = 1
    activation = F.relu
    dropout = 0.2
    weight_path = './weight/classifier.pth'
    num_epochs = args.n_epochs
    mode = args.mode

    # prepare dataset
    train_dataset = Customized_MiniGCDataset(2000, 10, 30)
    train_data_dict = get_train_dataloader(train_dataset, 32)
    train_dataloaders = train_data_dict['dataloaders']
    train_dataset_sizes = train_data_dict['dataset_sizes']

    test_dataset = Customized_MiniGCDataset(500, 10, 30)
    test_dataloader, test_dataset_size = get_test_dataloader(test_dataset, 32)

    # GraphClassifier
    model = GraphClassifier(in_feats, n_hidden, n_classes, n_layers,
                            activation, dropout)
    loss_func = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # train mode
    if mode == 'train':
        model = model.to(device)
        model = train(model, loss_func, optimizer, num_epochs,
                      train_dataloaders, train_dataset_sizes, device)
        torch.save(model.state_dict(), weight_path)

    # test mode
    else:
        model = model.to(device)
        model.load_state_dict(torch.load(weight_path))
        model.eval()
        acc = evaluate(model, test_dataloader, test_dataset_size, device)
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument('-cfg',
        "--config_file", default="", help="path to config file", type=str
    )
    parser.add_argument('--test_phase', action='store_true', help="use cpu")
    parser.add_argument("opts", help="Modify config options using the command-line", default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    # set pretrian = False to avoid loading weight repeatedly
    cfg.MODEL.PRETRAIN = False
    cfg.freeze()

    logger = setup_logger("reid_baseline", False, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
    logger.info("Running with config:\n{}".format(cfg))

    test_dataloader, num_query, dataset = get_test_dataloader(cfg, test_phase=True)


    distmat_paths = [cfg.TEST.DISTMAT1, cfg.TEST.DISTMAT2, cfg.TEST.DISTMAT3,
                     cfg.TEST.DISTMAT4, cfg.TEST.DISTMAT5, cfg.TEST.DISTMAT6]
    # 加载dist_mats
    dist_mats = []

    cnt = 0
    for distmat_path in distmat_paths:
        if os.path.isfile(distmat_path):
            f = h5py.File(distmat_path, 'r')
            mat = f['dist_mat'][()]
            mat = mat[np.newaxis, ...]
            dist_mats.append(mat)
            f.close()
            cnt += 1

    dist_mat = np.concatenate(dist_mats, axis=0).mean(axis=0)

    ## add rerank
    query_num = 3147
    logger.info('Reranking ...')
    print('dist_mat', dist_mat.shape)
    dist_mat = re_ranking_final(dist_mat, query_num, k1=6, k2=2, lambda_value=0.3) # (current best)
    print('dist_mat 2', dist_mat.shape)
    ##

    score = dist_mat
    index = np.argsort(score, axis=1)  # from small to large

    logger.info(f'Average {cnt} results')
    # saving results
    if args.test_phase:
        query_path = [t[0] for t in dataset.query]
        gallery_path = [t[0] for t in dataset.gallery]
        logger.info("-------------Write resutls to json file----------")

        results = {}
        top_k = 200
        for i in range(len(query_path)):
            topk_res = []
            for j in range(top_k):
                img_path = gallery_path[index[i, j]]
                # print(img_path)
                topk_res.append(img_path.split('/')[-1].split('_')[-1])
            results[query_path[i].split('/')[-1].split('_')[-1]] = topk_res

        # 写入结果
        strtime = time.strftime("%Y%m%d_%H%M%S", time.localtime())
        json.dump(results, open('submit/ensemble_rerank_%s.json' % (strtime), 'w'))
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument('-cfg',
                        "--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument('--test_phase', action='store_true', help="use cpu")
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    # set pretrian = False to avoid loading weight repeatedly
    cfg.MODEL.PRETRAIN = False
    cfg.freeze()

    logger = setup_logger("reid_baseline", False, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
    logger.info("Running with config:\n{}".format(cfg))

    test_dataloader, num_query, dataset = get_test_dataloader(cfg,
                                                              test_phase=True)

    original_filenames = []
    for img_path, _, _ in dataset.query:
        original_filenames.append(img_path.split('/')[-1].split('_')[1])
    query_idx = argsort(original_filenames)

    print('fixed query order', [dataset.query[i][0] for i in query_idx[:10]])

    original_filenames = []
    for img_path, _, _ in dataset.gallery:
        original_filenames.append(img_path.split('/')[-1].split('_')[1])
    gallery_idx = argsort(original_filenames)
    print('fixed gallery order',
          [dataset.gallery[i][0] for i in gallery_idx[:10]])

    distmat_paths = [
        cfg.TEST.DISTMAT1,
        cfg.TEST.DISTMAT2,
        cfg.TEST.DISTMAT3,
        cfg.TEST.DISTMAT4,
        cfg.TEST.DISTMAT5,
        cfg.TEST.DISTMAT6,
        cfg.TEST.DISTMAT7,
        cfg.TEST.DISTMAT8,
        cfg.TEST.DISTMAT9,
        cfg.TEST.DISTMAT10,
        cfg.TEST.DISTMAT11,
        cfg.TEST.DISTMAT12,
        cfg.TEST.DISTMAT13,
        cfg.TEST.DISTMAT14,
        cfg.TEST.DISTMAT15,
        cfg.TEST.DISTMAT16,
        cfg.TEST.DISTMAT17,
        cfg.TEST.DISTMAT18,
    ]

    cnt = 0
    for distmat_path in distmat_paths:
        if os.path.isfile(distmat_path):
            f = h5py.File(distmat_path, 'r')
            mat = f['dist_mat'][()]

            if not distmat_path.endswith('baseline_v5.1_distmat.h5'):
                mat = mat[query_idx]
                mat = mat[:, gallery_idx]

                f2 = h5py.File('%s_sorted.h5' % distmat_path, 'w')
                f2.create_dataset('dist_mat', data=mat, compression='gzip')
                f2.close()

                cnt += 1

            #mat = mat[np.newaxis, ...]
            #dist_mats.append(mat)
            f.close()

        else:
            logger.info(f'Invalid checkpoint path {distmat_path}')
    logger.info(f'Sort {cnt} results')
Esempio n. 8
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument('-cfg',
                        "--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument('--test_phase', action='store_true', help="use cpu")
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    # set pretrian = False to avoid loading weight repeatedly
    cfg.MODEL.PRETRAIN = False
    cfg.DATASETS.PRELOAD_IMAGE = False
    cfg.freeze()

    logger = setup_logger("reid_baseline", False, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
    logger.info("Running with config:\n{}".format(cfg))

    cudnn.benchmark = True

    model = build_model(cfg, 0)
    model = model.cuda()
    model.load_params_wo_fc(torch.load(cfg.TEST.WEIGHT))

    test_dataloader, num_query, dataset = get_test_dataloader(cfg,
                                                              test_phase=True)

    use_local_feature = True
    use_rerank = True
    use_cross_feature = False

    distmat, index, distmat1, distmat2 = inference_aligned_flipped(
        cfg, model, test_dataloader, num_query, use_local_feature, use_rerank,
        use_cross_feature)

    suffix = 'flip'
    if use_local_feature:
        suffix += '_aligned'
    if use_rerank:
        suffix += '_rerank'
    if use_cross_feature:
        suffix += '_cross'

    # saving results
    if args.test_phase:
        query_path = [t[0] for t in dataset.query]
        gallery_path = [t[0] for t in dataset.gallery]
        logger.info("-------------Write resutls to json file----------")

        results = {}
        top_k = 200
        for i in range(len(query_path)):
            topk_res = []
            for j in range(top_k):
                img_path = gallery_path[index[i, j]]
                # print(img_path)
                topk_res.append(img_path.split('/')[-1].split('_')[-1])
            results[query_path[i].split('/')[-1].split('_')[-1]] = topk_res

        # 写入结果
        if not os.path.isdir('submit'):
            os.mkdir('submit')

        strtime = time.strftime("%Y%m%d_%H%M%S", time.localtime())
        json.dump(
            results,
            open(
                'submit/reid_%s_%s_%s.json' %
                (cfg.MODEL.NAME, strtime, suffix), 'w'))

        # saving dist_mats
        mat_path = 'dist_mats'
        if not os.path.isdir(mat_path):
            os.mkdir(mat_path)
        f = h5py.File(
            '%s/test_%s_%s_%s.h5' %
            (mat_path, cfg.MODEL.NAME, strtime, suffix), 'w')
        f.create_dataset('dist_mat', data=distmat, compression='gzip')

        if distmat1 is not None:
            f.create_dataset('dist_mat1', data=distmat1, compression='gzip')
        if distmat2 is not None:
            f.create_dataset('dist_mat2', data=distmat2, compression='gzip')
        f.close()
Esempio n. 9
0
def train(args):
    # hyper parameter
    no_layer_D =  3
    no_D = 2
    lamda = args.lamda # 10
    alpha = args.alpha
    have_gt = 0
    gpu_id = 0
    no_epoch_eval = 300
    # define model
    if args.model_name == 'Normal':
        Generator = model.get_generator(False,ngf=32, n_downsample_global=3, n_blocks_global=args.resblock, gpu_ids=[gpu_id] )
        Discriminator = model.get_discriminator(input_nc = 6, ndf=64, n_layers_D = no_layer_D, gpu_ids=[gpu_id])
        
    elif args.model_name == 'Enhance':
        Generator = model.get_generator(True,ngf=32, n_downsample_global=3, n_blocks_global=args.resblock, gpu_ids=[gpu_id] )
        Discriminator = model.get_discriminator(input_nc = 6,ndf=64, n_layers_D = no_layer_D, gpu_ids=[gpu_id])
    else:
        raise Exception("The model name is wrong/ not supported yet: {}".format(args.model_name))

    no_params_G = no_of_parameters(Generator)
    no_params_D = no_of_parameters(Discriminator)
    save = SaveData(args)
    log = "Number of Generator parameter  {}".format(no_params_G)
    print(log)
    save.save_log(log)
    log = "Number of Discriminator parameter  {}".format(no_params_D)
    print(log)
    save.save_log(log)
    
    save.write_csv_header('mode', 'epoch', 'lr', 'sum_loss','loss_D','loss_G','time(min)', 'val_psnr', 'val_ssim')
    last_epoch = 0

    if args.multi == True:
        multi = 1
        print("Using", torch.cuda.device_count(), "GPUs!")
        Generator = nn.DataParallel(Generator)
        Discriminator = nn.DataParallel(Discriminator)

    cudnn.benchmark = True

    # resume model
    if args.finetuning:
        Generator, Discriminator,last_epoch = save.load_model(Generator,Discriminator)

    # dataloader
    dataloader = get_train_dataloader(args.trainset, args.batchSize)
    testdataloader = get_test_dataloader(args.testset, 1)
    start_epoch = last_epoch

    # load function
    lossGAN = model.GANLoss(use_lsgan=args.lsGan,tensor=torch.cuda.FloatTensor)
    lossFeat = nn.L1Loss()
    lossMse = nn.MSELoss()
    if True:
        lossVGG = model.VGGLoss([gpu_id])

    loss_names = ['G_GAN', 'G_GAN_Feat', 'G_VGG', 'D_real', 'D_fake', 'G_L2']

    # optimizer
    optim_G = optim.Adam(Generator.parameters(), lr=args.lr)
    optim_D = optim.Adam(Discriminator.parameters(), lr=args.lr)
    lr_cheduler = LrScheduler(args.lr, args.decayType, args.lrDecay)

    # log var
    avg_loss_D_real = AverageMeter()
    avg_loss_D_fake = AverageMeter()
    avg_loss_D = AverageMeter()
    avg_loss_G_gan = AverageMeter()
    avg_loss_G_vgg = AverageMeter()
    avg_loss_G_mse = AverageMeter()
    avg_loss_G = AverageMeter()
    avg_time = AverageMeter()
    avg_time.reset()



    print("Begin train from epoch: {}".format(start_epoch))
    print("Batch len: {}".format(len(dataloader.dataset)))

    for epoch in range(start_epoch, args.epochs):
        start = time.time()
        learning_rate = lr_cheduler.adjust_lr(epoch, optim_G)
        learning_rate = lr_cheduler.adjust_lr(epoch, optim_D)
        # learning_rate = args.lr
        avg_loss_D.reset()
        avg_loss_G.reset()
        for batch, (hazy_imgs, gt_imgs, names) in enumerate(dataloader):
            hazy_imgs = Variable(hazy_imgs.cuda())
            gt_imgs = Variable(gt_imgs.cuda())
            
            # Forward
            fake_imgs ,enhance_imgs = Generator(hazy_imgs)
            
            # Update discriminator
            set_requires_grad(Discriminator,True) # Enable update D 
            optim_D.zero_grad()

            # Fake images
            input_concat = torch.cat((hazy_imgs, fake_imgs.detach()), dim=1)
            pred_fake = Discriminator.forward(input_concat)
            loss_D_fake = lossGAN(pred_fake,False)

            # Real Detection
            input_concat = torch.cat((hazy_imgs, gt_imgs), dim=1)
            pred_real = Discriminator.forward(input_concat)
            loss_D_real = lossGAN(pred_real,True)
            
            total_loss_D = alpha*(loss_D_fake + loss_D_real)

            # back prob
            total_loss_D.backward()
            optim_D.step()

            # log loss
            avg_loss_D_real.update(loss_D_real.data.item() , args.batchSize)
            avg_loss_D_fake.update(loss_D_fake.data.item() , args.batchSize)
            avg_loss_D.update(total_loss_D.data.item() , args.batchSize)

            # Update discriminator
            set_requires_grad(Discriminator,False) # Disable update D 
            optim_G.zero_grad()

            # Loss Generator GAN
            pred_fake_G = Discriminator.forward(torch.cat((hazy_imgs, fake_imgs), dim=1))
            loss_G_GAN = lossGAN(pred_fake_G,True)

            # Feature matching loss
            loss_G_GAN_Feat = 0
            if False:
                pred_fake = Discriminator.forward(torch.cat((hazy_imgs, fake_imgs), dim=1))
                feat_weights = 4.0 / (no_layer_D + 1)
                D_weights = 1.0 / no_D
                for i in range(no_D):
                    for j in range(len(pred_fake[i]) - 1):
                        loss_G_GAN_Feat += D_weights * feat_weights * \
                                           lossFeat(pred_fake[i][j],
                                                              pred_real[i][j].detach()) * lamda

            # VGG feature matching loss
            loss_G_VGG = 0
            if True:
                loss_G_VGG = lossVGG(enhance_imgs, gt_imgs) * lamda
            loss_G_L2 = lossMse(enhance_imgs, gt_imgs)
            total_loss_G = alpha*loss_G_GAN + loss_G_GAN_Feat + loss_G_VGG + loss_G_L2

            # backprob
            total_loss_G.backward()
            optim_G.step()

            # update loss
            avg_loss_G_vgg.update(loss_G_VGG.data.item(), args.batchSize)
            avg_loss_G_gan.update(loss_G_GAN.data.item(), args.batchSize)
            avg_loss_G_mse.update(loss_G_L2.data.item(), args.batchSize)
            avg_loss_G.update(total_loss_G.data.item(), args.batchSize)


        end = time.time()
        epoch_time = (end - start)
        avg_time.update(epoch_time)
        log = "[{} / {}] \tLearning_rate: {:.5f} \tTotal_loss:{:.4f} \tAvg_loss_D: {:.4f} \tAvg_loss_G: {:.4f} \tTotal_time: {:.4f} min \tBatch_time: {:.4f} sec".format(
            epoch + 1, args.epochs, learning_rate, avg_loss_D.sum() + avg_loss_G.sum(), avg_loss_D.avg(),avg_loss_G.avg(), avg_time.sum() / 60, avg_time.avg())
        print(log)
        save.save_log(log)
        save.log_csv('train', epoch + 1, learning_rate, avg_loss_D.sum() + avg_loss_G.sum(), avg_loss_D.avg(),avg_loss_G.avg(), avg_time.sum() / 60)
        save.write_tf_board('sum_loss',avg_loss_D.sum() + avg_loss_G.sum(),epoch+1)

        save.write_tf_board('avg_loss_D_real', avg_loss_D_real.avg(), epoch + 1)
        save.write_tf_board('avg_loss_D_fake', avg_loss_D_fake.avg(), epoch + 1)
        save.write_tf_board('avg_loss_D',avg_loss_D.avg(),epoch+1)

        save.write_tf_board('avg_loss_G_GAN', avg_loss_G_gan.avg(), epoch + 1)
        save.write_tf_board('avg_loss_G_mse', avg_loss_G_mse.avg(), epoch + 1)
        save.write_tf_board('avg_loss_G_VGG', avg_loss_G_vgg.avg(), epoch + 1)
        save.write_tf_board('avg_loss_G',avg_loss_G.avg(),epoch+1)

        if (epoch + 1) % args.period == 0 and (epoch + 1) >=no_epoch_eval:
            Generator.eval()
            if have_gt:
                avg_psnr, avg_ssim = test(Generator, testdataloader,epoch+1,1)
            else:
                avg_niqe = test(Generator, testdataloader,epoch+1,0)
            Generator.train()
            if have_gt:
                log = "*** [{} / {}] \tVal PSNR: {:.4f} \tVal SSIM: {:.4f} ".format(epoch + 1, args.epochs, avg_psnr,
                                                                                avg_ssim)
                print(log)
                save.save_log(log)
                save.log_csv('test', epoch + 1, learning_rate, avg_loss_D.sum() + avg_loss_G.sum(), avg_loss_D.avg(),avg_loss_G.avg(), avg_time.sum() / 60, avg_psnr, avg_ssim)
                save.save_model(Generator,Discriminator, epoch+1, avg_psnr)
                save.write_tf_board('val_psnr', avg_psnr, epoch+1)
            else:
                log = "*** [{} / {}] \tVal NIQE: {:.4f}  ".format(epoch + 1, args.epochs, avg_niqe)
                print(log)
                save.save_log(log)
                save.log_csv('test', epoch + 1, learning_rate, avg_loss_D.sum() + avg_loss_G.sum(), avg_loss_D.avg(),
                             avg_loss_G.avg(), avg_time.sum() / 60, avg_niqe)
                save.save_model(Generator, Discriminator, epoch+1, 100 -avg_niqe)
                save.write_tf_board('val_niqe', avg_niqe, epoch + 1)
Esempio n. 10
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument('-cfg',
        "--config_file", default="", help="path to config file", type=str
    )
    parser.add_argument('--test_phase', action='store_true', help="use cpu")
    parser.add_argument("opts", help="Modify config options using the command-line", default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    # set pretrian = False to avoid loading weight repeatedly
    cfg.MODEL.PRETRAIN = False
    cfg.freeze()

    logger = setup_logger("reid_baseline", False, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
    logger.info("Running with config:\n{}".format(cfg))

    test_dataloader, num_query, dataset = get_test_dataloader(cfg, test_phase=True)


    distmat_paths = [cfg.TEST.DISTMAT1, cfg.TEST.DISTMAT2, cfg.TEST.DISTMAT3,
                     cfg.TEST.DISTMAT4, cfg.TEST.DISTMAT5, cfg.TEST.DISTMAT6,
                     cfg.TEST.DISTMAT7, cfg.TEST.DISTMAT8, cfg.TEST.DISTMAT9,
                     cfg.TEST.DISTMAT10, cfg.TEST.DISTMAT11, cfg.TEST.DISTMAT12,
                     cfg.TEST.DISTMAT13, cfg.TEST.DISTMAT14, cfg.TEST.DISTMAT15,
                     cfg.TEST.DISTMAT16, cfg.TEST.DISTMAT17, cfg.TEST.DISTMAT18,

                     ]
    # 加载dist_mats
    dist_mats = []
    #weights= np.asarray([0.783, 0.776, 0.76])
    #weights = weights / weights.sum()

    cnt = 0
    for distmat_path in distmat_paths:
        if os.path.isfile(distmat_path):
            f = h5py.File(distmat_path, 'r')
            mat = f['dist_mat'][()]
            mat = mat[np.newaxis, ...]
            dist_mats.append(mat)
            f.close()
            cnt += 1
        else:
            logger.info(f'Invalid checkpoint path {distmat_path}')

    logger.info(f'Average {cnt} results')
    dist_mat = np.concatenate(dist_mats, axis=0).mean(axis=0)

    # 是否使用 query唯一性 假设 (可以+ 0.15%)
    use_query_constraint = True
    if use_query_constraint:
        logger.info('Using query unique constraint ...')
        dist_mat = query_constraint_dimstmat(dist_mat, k1=14, gamma=0.2)

    index = np.argsort(dist_mat, axis=1)  # from small to large

    # saving results
    if args.test_phase:
        query_path = [t[0] for t in dataset.query]
        gallery_path = [t[0] for t in dataset.gallery]
        logger.info("-------------Write resutls to json file----------")

        results = {}
        top_k = 200
        for i in range(len(query_path)):
            topk_res = []
            for j in range(top_k):
                img_path = gallery_path[index[i, j]]
                # print(img_path)
                topk_res.append(img_path.split('/')[-1].split('_')[-1])
            results[query_path[i].split('/')[-1].split('_')[-1]] = topk_res

        # 写入结果
        strtime = time.strftime("%Y%m%d_%H%M%S", time.localtime())
        json.dump(results, open('submit/ensemble_%s_%dm.json' % (strtime, cnt), 'w'))

        # saving dist_mats
        mat_path = 'dist_mats'
        if not os.path.isdir(mat_path):
            os.mkdir(mat_path)
        mat_path = '%s/ensemble_%s_%dm.h5' % (mat_path, strtime, cnt)
        f = h5py.File(mat_path, 'w')
        f.create_dataset('dist_mat', data=dist_mat, compression='gzip')
        f.close()
Esempio n. 11
0
def test(args):
    # hyper params
    no_layer_D = 3
    no_D = 2
    lamda = 10
    # define model
    if args.model_name == 'Normal':
        Generator = model.get_generator(False,
                                        ngf=32,
                                        n_downsample_global=3,
                                        n_blocks_global=args.resblock,
                                        gpu_ids=[0])
        #Discriminator = model.get_discriminator(input_nc=6, ndf=64, n_layers_D=no_layer_D, gpu_ids=[args.gpu])

    elif args.model_name == 'Enhance':
        Generator = model.get_generator(True,
                                        ngf=32,
                                        n_downsample_global=3,
                                        n_blocks_global=args.resblock,
                                        gpu_ids=[0])
        #Discriminator = model.get_discriminator(input_nc=6, ndf=64, n_layers_D=no_layer_D, gpu_ids=[args.gpu])
    else:
        raise Exception(
            "The model name is wrong/ not supported yet: {}".format(
                args.model_name))

    Generator.load_state_dict(torch.load(args.pretrained_model))

    testdataloader = get_test_dataloader(args.dataset, 1)
    Generator.eval()

    avg_psnr = 0
    avg_ssim = 0
    avg_niqe = 0
    avg_time = 0
    count = 0
    crop = False
    have_gt = False  # True : use PSNR, SSIM instead of NIQE

    if crop:
        center_crop = 256
    # make val folder
    if not os.path.isdir("val/%s/%s" % (args.model_name, args.dataset)):
        os.makedirs("val/%s/%s" % (args.model_name, args.dataset),
                    exist_ok=False)

    for batch, (im_hazy, im_gt, im_name) in enumerate(testdataloader):
        count = count + 1
        #import pdb; pdb.set_trace()

        with torch.no_grad():
            im_hazy = Variable(im_hazy.cuda(), volatile=False)
            if have_gt:
                im_gt = Variable(im_gt.cuda())
            W = im_hazy.size()[2]
            H = im_hazy.size()[3]
            if crop:
                Ws = W // 2
                Hs = H // 2

                im_hazy = im_hazy[:, :,
                                  (Ws - center_crop // 2):(Ws +
                                                           center_crop // 2),
                                  (Hs - center_crop // 2):(Hs +
                                                           center_crop // 2)]
                if have_gt:
                    im_gt = im_gt[:, :,
                                  (Ws - center_crop // 2):(Ws +
                                                           center_crop // 2),
                                  (Hs - center_crop // 2):(Hs +
                                                           center_crop // 2)]
            else:
                if W % 32:
                    Wr = W % 32
                    im_hazy = im_hazy[:, :, :W - Wr, :]
                    if have_gt:
                        im_gt = im_gt[:, :, :W - Wr, :]
                if H % 32:
                    Hr = H % 32
                    im_hazy = im_hazy[:, :, :, :(H - Hr)]
                    if have_gt:
                        im_gt = im_gt[:, :, :, :(H - Hr)]

            begin_time = time.time()
            output, enhance = Generator(im_hazy)
            #enhance = output
            end_time = time.time()
            avg_time += (end_time - begin_time)

        enhance = unnormalize(enhance[0])

        out = Image.fromarray(np.uint8(enhance), mode='RGB')  # output of SRCNN
        name = im_name[0][0:-4] + '.png'
        out.save('val/%s/%s/%s' % (args.model_name, args.dataset, name))

        # =========== Target Image ===============
        if have_gt:
            im_gt = unnormalize(im_gt[0])
            # crop to size output
            im_gt = im_gt[:enhance.shape[0], :enhance.shape[1], :]
            psnr, ssim = psnr_ssim_from_sci(enhance, im_gt)
            print('%d : %s PSNR/SSIM: %.4f/%.4f ' % (count, name, psnr, ssim))

            avg_ssim += ssim
            avg_psnr += psnr
        else:
            im_niqe = niqe_from_skvideo(enhance)
            print('%d : %s NIQE: %.4f ' % (count, name, im_niqe))
            avg_niqe += im_niqe
    if have_gt:
        print('AVG PSNR/AVG SSIM : %.4f/%.4f ' %
              (avg_psnr / len(testdataloader.dataset),
               avg_ssim / len(testdataloader.dataset)))
    else:
        print('AVG NIQE : %.4f' % (avg_niqe / len(testdataloader.dataset)))
    print('Avg pred time per image: %.4f' %
          (avg_time / len(testdataloader.dataset)))
Esempio n. 12
0
def train(args):
    # define model
    one_scale = False

    if args.model_type == 'one_scale':
        my_model = model.OneScale(3,False)
        one_scale = True
    elif args.model_type == 'one_scale_lsc':
        my_model = model.OneScale(3,True)
        one_scale = True
    elif args.model_type == 'multi_scale':
        my_model = model.MultiScale(False)
    elif args.model_type == 'multi_scale_lsc':
        my_model = model.MultiScale(True)
    else:
        raise Exception("Model type is not supported: {}".format(args.model_type))

    my_model.apply(weights_init)
    no_params = no_of_parameters(my_model)

    save = SaveData(args)
    log = "Number of parameter {}".format(no_params)
    print(log)
    save.save_log(log)
    save.write_csv_header('mode','epoch','lr','batch_loss','time(min)','val_psnr','val_ssim')
    last_epoch = 0

    if args.multi == True:
        multi  = 1
        print("Using", torch.cuda.device_count(), "GPUs!")
        my_model = nn.DataParallel(my_model)

    my_model.cuda()
    cudnn.benchmark = True


    # resume model
    if args.finetuning:
        my_model, last_epoch = save.load_model(my_model)

    # dataloader
    dataloader = get_train_dataloader('GoPro', args)
    testdataloader = get_test_dataloader('GoPro', args)

    start_epoch = last_epoch

    # load function
    lossfunction = nn.MSELoss()
    lossfunction.cuda()

    lossfunction1 = nn.MSELoss()
    lossfunction1.cuda()

    def loss_multi_function(sharp1, sharp2,sharp3,sharp_label_s1,sharp_label_s2,sharp_label_s3, beta1 = 1, beta2= 1):
        loss1 = lossfunction1(sharp1,sharp_label_s1)
        loss2 = lossfunction1(sharp2, sharp_label_s2)
        loss3 = lossfunction1(sharp3, sharp_label_s3)
        return (loss1+beta1*loss2+ beta2*loss3)/6

    # optimizer
    optimizer = optim.Adam(my_model.parameters(), lr=args.lr)
    lr_cheduler = LrScheduler(args.lr, 'inv', args.lrDecay)

    # log var
    avg_loss = AverageMeter()
    avg_time = AverageMeter()
    avg_time.reset()

    print("Begin train from epoch: {}".format(start_epoch))
    print("Batch len: {}".format(len(dataloader.dataset)))
    print("Test len: {}".format(len(testdataloader.dataset)))

    for epoch in range(start_epoch, args.epochs):
        start = time.time()
        # learning_rate = lr_cheduler.adjust_lr(epoch, optimizer)
        learning_rate = args.lr
        avg_loss.reset()
        for batch, images in enumerate(dataloader):
            blur_img_s1 = images['blur_image_s1']
            blur_img_s2 = images['blur_image_s2']
            blur_img_s3 = images['blur_image_s3']
            sharp_img_s1 = images['sharp_image_s1']
            sharp_img_s2 = images['sharp_image_s2']
            sharp_img_s3 = images['sharp_image_s3']

            blur_img_s1 = Variable(blur_img_s1.cuda())
            blur_img_s2 = Variable(blur_img_s2.cuda())
            blur_img_s3 = Variable(blur_img_s3.cuda())
            sharp_img_s1 = Variable(sharp_img_s1.cuda())
            sharp_img_s2 = Variable(sharp_img_s2.cuda())
            sharp_img_s3 = Variable(sharp_img_s3.cuda())

            my_model.zero_grad()
            if one_scale:
                output = my_model(blur_img_s1)
                loss = lossfunction(output, sharp_img_s1)
            else:
                sharp_s1,sharp_s2,sharp_s3 = my_model(blur_img_s1,blur_img_s2,blur_img_s3)
                loss = loss_multi_function(sharp_s1,sharp_s2,sharp_s3,sharp_img_s1,sharp_img_s2,sharp_img_s3)

            total_loss = loss
            total_loss.backward()
            optimizer.step()
            avg_loss.update(loss.data.item(), args.batchSize)
        end = time.time()
        epoch_time = (end - start)
        avg_time.update(epoch_time)
        log = "[{} / {}] \tLearning_rate: {:.5f} \tTotal_loss:{:.4f} \tAvg_loss: {:.4f} \tTotal_time: {:.4f} min \tBatch_time: {:.4f}".format(
            epoch + 1, args.epochs, learning_rate, avg_loss.sum(), avg_loss.avg(), avg_time.sum() / 60, avg_time.avg())
        print(log)
        save.save_log(log)
        save.log_csv('train',epoch+1,learning_rate,avg_loss.sum(),avg_time.sum()/60)
        if (epoch + 1) % args.period == 0:
            my_model.eval()
            avg_psnr, avg_ssim = test(my_model, testdataloader,one_scale)
            my_model.train()
            log = "*** [{} / {}] \tVal PSNR: {:.4f} \tVal SSIM: {:.4f} ".format(epoch + 1, args.epochs, avg_psnr,
                                                                                avg_ssim)
            print(log)
            save.save_log(log)
            save.log_csv('test', epoch + 1, learning_rate, avg_loss.sum(), avg_time.sum() / 60,avg_psnr,avg_ssim)
            save.save_model(my_model, epoch,avg_psnr)
Esempio n. 13
0
def test(args):
    one_scale = False

    if args.model_type == 'one_scale':
        my_model = model.OneScale(3, False)
        one_scale = True
    elif args.model_type == 'one_scale_lsc':
        my_model = model.OneScale(3, True)
        one_scale = True
    elif args.model_type == 'multi_scale':
        my_model = model.MultiScale(False)
    elif args.model_type == 'multi_scale_lsc':
        my_model = model.MultiScale(True)
    else:
        raise Exception("Model type is not supported: {}".format(
            args.model_type))

    my_model.apply(weights_init)
    my_model.cuda()

    my_model.load_state_dict(torch.load(args.pretrained_model))

    testdataloader = get_test_dataloader('GoPro', args)
    my_model.eval()

    avg_psnr = 0
    avg_ssim = 0
    avg_msssim = 0
    count = 0

    # make val folder
    if not os.path.isdir("val/%s/%s" % (args.model_type, args.saveDir)):
        os.makedirs("val/%s/%s" % (args.model_type, args.saveDir),
                    exist_ok=False)

    logFile = open("val/%s/%s" % (args.model_type, args.saveDir) + '/log.txt',
                   'w')

    for batch, images in enumerate(testdataloader):
        with torch.no_grad():
            blur_img_s1 = images['blur_image_s1']
            blur_img_s2 = images['blur_image_s2']
            blur_img_s3 = images['blur_image_s3']
            sharp_img_s1 = images['sharp_image_s1']
            sharp_img_s2 = images['sharp_image_s2']
            sharp_img_s3 = images['sharp_image_s3']

            blur_img_s1 = Variable(blur_img_s1.cuda())
            blur_img_s2 = Variable(blur_img_s2.cuda())
            blur_img_s3 = Variable(blur_img_s3.cuda())
            sharp_img_s1 = Variable(sharp_img_s1.cuda())
            sharp_img_s2 = Variable(sharp_img_s2.cuda())
            sharp_img_s3 = Variable(sharp_img_s3.cuda())
            if one_scale:
                output = my_model(blur_img_s1)
            else:
                output, _, _ = my_model(blur_img_s1, blur_img_s2, blur_img_s3)

        output = unnormalize(output[0])
        im_hr = unnormalize(sharp_img_s1[0])
        psnr, ssim = psnr_ssim_from_sci(output, im_hr)
        count = count + 1
        out = Image.fromarray(np.uint8(output), mode='RGB')
        out.save("val/%s/%s/DB_img_%03d.png" %
                 (args.model_type, args.saveDir, count))

        # =========== Target Image ===============
        psnr, ssim = psnr_ssim_from_sci(output,
                                        im_hr,
                                        padding=0,
                                        y_channels=False)
        msssim = MultiScaleSSIM(output[None], im_hr[None])

        log = '%d_img PSNR/SSIM/MS-SSIM: %.4f/%.4f/%.4f ' % (count, psnr, ssim,
                                                             msssim)
        print(log)
        logFile.write(log + '\n')
        logFile.flush()
        avg_ssim += ssim
        avg_psnr += psnr
        avg_msssim += msssim

    log = 'AVG PSNR/AVG SSIM/AVG MS-SSIM : %.4f/%.4f/%.4f ' % (
        avg_psnr / len(testdataloader.dataset), avg_ssim /
        len(testdataloader.dataset), avg_msssim / len(testdataloader.dataset))
    print(log)
    logFile.write(log + '\n')
    logFile.flush()
Esempio n. 14
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument('-cfg',
        "--config_file", default="", help="path to config file", type=str
    )
    parser.add_argument('--test_phase', action='store_true', help="use cpu")
    parser.add_argument("opts", help="Modify config options using the command-line", default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    # set pretrian = False to avoid loading weight repeatedly
    cfg.MODEL.PRETRAIN = False
    cfg.freeze()

    logger = setup_logger("reid_baseline", False, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
    logger.info("Running with config:\n{}".format(cfg))

    cudnn.benchmark = True

    model = build_model(cfg, 0)
    model = model.cuda()
    model.load_params_wo_fc(torch.load(cfg.TEST.WEIGHT))

    test_dataloader, num_query, dataset = get_test_dataloader(cfg, test_phase=True)


    if cfg.MODEL.NAME.endswith('abd'):
        target_theta = 0.45  # abd网络的参数
    else:
        target_theta = 0.95  # 非abd网络的参数

    target_theta = 0.45

    # thetas = [0.45, 0.5, 0.9, 0.95]
    thetas = [target_theta]

    use_flip = True

    if use_flip:
        scores, indices, dist_mats = inference_flipped(cfg, model, test_dataloader, num_query, thetas)
    else:
        scores, indices, dist_mats = inference(cfg, model, test_dataloader, num_query, thetas)

    print('distmats', len(dist_mats), 'thetas', len(thetas))


    # saving results
    if args.test_phase:
        query_path = [t[0] for t in dataset.query]
        gallery_path = [t[0] for t in dataset.gallery]
        logger.info("-------------Write resutls to json file----------")

        for idx, (theta, score, index) in enumerate(zip(thetas, scores, indices)):
            results = {}
            top_k = 200
            for i in range(len(query_path)):
                topk_res = []
                for j in range(top_k):
                    img_path = gallery_path[index[i, j]]
                    # print(img_path)
                    topk_res.append(img_path.split('/')[-1].split('_')[-1])
                results[query_path[i].split('/')[-1].split('_')[-1]] = topk_res

            # 写入结果
            strtime = time.strftime("%Y%m%d_%H%M%S", time.localtime())
            if use_flip:
                json.dump(results, open('submit/reid_%s_%s_(r, t %.3f, flip).json' % (cfg.MODEL.NAME, strtime, theta), 'w'))
            else:
                json.dump(results, open('submit/reid_%s_%s_(r, t %.3f).json' % (cfg.MODEL.NAME, strtime, theta), 'w'))


            if abs(theta - target_theta) < 1e-4:
                # saving dist_mats
                f = h5py.File('dist_mats/test_%s_%s_t_%.2f_flip.h5' % (cfg.MODEL.NAME, strtime, theta), 'w')
                f.create_dataset('dist_mat', data=dist_mats[idx], compression='gzip')
                f.close()