def test(val_loader, num_query, query_name, gallery_name, num_classes):
    parser = argparse.ArgumentParser(description="ReID Baseline Training")
    parser.add_argument("--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logger = setup_logger("reid_baseline", output_dir, if_train=False)
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID

    train_model_path = os.path.join(
        cfg.OUTPUT_DIR, cfg.MODEL.NAME + '_{}.pth'.format(cfg.TEST.WEIGHT))
    model = make_model(cfg, num_class=num_classes)
    if torch.cuda.device_count() > 1:
        model.load_param(train_model_path)
    else:
        model.load_param(
            os.path.join(cfg.OUTPUT_DIR,
                         cfg.MODEL.NAME + '_{}.pth'.format(cfg.TEST.WEIGHT)))

    do_inference(cfg, model, val_loader, num_query, query_name, gallery_name)
示例#2
0
    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID


    if cfg.TEST.FLIP_FEATS != 'on': 
        train_loader, val_loader, num_query_normal, num_classes,val_loader_center,val_loader_lb,val_loader_rb,val_loader_rt,val_loader_lt = make_dataloader(cfg)
        val_loader_normal = [val_loader,val_loader_center,val_loader_lt,val_loader_rt,val_loader_lb,val_loader_rb] 
        model = make_model(cfg, num_class=num_classes)
        model.load_param(cfg.TEST.WEIGHT)

        do_inference_multi(cfg,
                 model,
                 val_loader_normal,
                 num_query_normal)

    else:
        train_loader, val_loader_normal, num_query_normal, num_classes = make_dataloader(cfg)
        model = make_model(cfg, num_class=num_classes)
        model.load_param(cfg.TEST.WEIGHT)

        do_inference(cfg,
                 model,
                 val_loader_normal,
                 num_query_normal)
示例#3
0
import os
import sys
from config.cfg import Cfg
import torch
from torch.backends import cudnn

sys.path.append('.')
from datasets import make_dataloader
from processor import do_inference
from model import make_model
from utils.logger import setup_logger

if __name__ == "__main__":
    Cfg.freeze()
    log_dir = Cfg.DATALOADER.LOG_DIR
    logger = setup_logger('pose-transfer-avs.test', log_dir)
    logger.info("Running with config:\n{}".format(Cfg))

    os.environ['CUDA_VISIBLE_DEVICES'] = Cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    train_loader, val_loader = make_dataloader(Cfg)
    model_G, _, _ = make_model(Cfg)
    model_G.load_state_dict(torch.load(Cfg.TEST.WEIGHT))

    do_inference(Cfg, model_G, val_loader)
示例#4
0
文件: test.py 项目: Codsir/huawei2020
    os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID

    #train_loader, val_loader_green, val_loader_normal, num_query_green,num_query_normal, num_classes = make_dataloader(cfg)
    model = make_model(cfg, num_class=3094)
    model.load_param(cfg.TEST.WEIGHT)

    transform_test = transforms.Compose([
        transforms.Resize(cfg.INPUT.SIZE_TEST),
        # 将图像中央的高和宽均为224的正方形区域裁剪出来
        # transforms.CenterCrop(224),
        transforms.ToTensor(),
        # transforms.Normalize((0.524, 0.4943, 0.473), (0.03477, 0.03015, 0.02478))
        transforms.Normalize(mean=cfg.INPUT.PIXEL_MEAN,
                             std=cfg.INPUT.PIXEL_STD)
    ])

    batch_size = 128
    testset = torchvision.datasets.ImageFolder(
        root='../test_data_final/test_data_A', transform=transform_test)
    test_loader = torch.utils.data.DataLoader(testset,
                                              batch_size=batch_size,
                                              shuffle=False,
                                              num_workers=4,
                                              pin_memory=True)

    num_query = len(os.listdir("../test_data_final/test_data_A\\query"))
    query_name = testset.samples[-num_query:]
    #print(query_name[0])
    gallery_name = testset.samples[:-num_query]
    do_inference(cfg, model, test_loader, num_query, query_name, gallery_name)
示例#5
0
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logger = setup_logger("reid_baseline", output_dir, if_train=False)
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID

    train_loader, val_loader, num_query, num_classes = make_dataloader(cfg)
    model = make_model(cfg, num_class=num_classes)
    model.load_param(cfg.TEST.WEIGHT)

    do_inference(cfg, model, val_loader, num_query)
示例#6
0
import os
import sys
from config.config import Configuration
import torch
from torch.backends import cudnn

sys.path.append('.')
from datasets import make_dataloader
from processor import do_inference
from model import make_model
from utils.logger import setup_logger

if __name__ == "__main__":
    Cfg = Configuration()
    log_dir = Cfg.LOG_DIR
    logger = setup_logger('{}.test'.format(Cfg.PROJECT_NAME), log_dir)

    os.environ['CUDA_VISIBLE_DEVICES'] = Cfg.DEVICE_ID
    cudnn.benchmark = True
    # This flag allows you to enable the inbuilt cudnn auto-tuner to find the best algorithm to use for your hardware.

    train_loader, test_loader = make_dataloader(Cfg)
    model = make_model(Cfg)
    model.load_state_dict(torch.load(Cfg.TEST_WEIGHT))

    do_inference(Cfg, model, test_loader)