Exemple #1
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument(
        "--config_file",
        default="/media/bi/Data/Mycode/car_demo/ai_city/configs/submit.yml",
        help="path to config file",
        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    train_loader, val_loader, num_query, num_classes, dataset = make_data_loader(
        cfg)
    model = build_model(cfg, num_classes)
    model.load_param(cfg.TEST.WEIGHT)

    ## read meta information
    dataset = VeRi(cfg.DATASETS.ROOT_DIR)
    print("test_tracks", dataset.test_tracks)
    indices_np = inference(cfg, model, val_loader, num_query, dataset)
    #write_result(indices_np, os.path.dirname(cfg.TEST.WEIGHT), topk=100)

    write_result_with_track(indices_np, os.path.dirname(cfg.TEST.WEIGHT),
                            dataset.test_tracks)
Exemple #2
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument("--config_file",
                        default="./configs/debug.yml",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        # with open(args.config_file, 'r') as cf:
        #     config_str = "\n" + cf.read()
        #     logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    train_loader, val_loader, num_query, num_classes, dataset = make_data_loader(
        cfg)
    model = build_model(cfg, num_classes)
    model.load_param(cfg.TEST.WEIGHT)

    imgs, feats = inference_to_get_feats(cfg, model, val_loader, num_query,
                                         dataset)

    import numpy as np
    np.save('feats_extract.npy', feats.cpu().detach().numpy())
    print(imgs[:50])
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument("--config_file",
                        default="./configs/debug.yml",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    train_loader, val_loader, num_query, num_classes, dataset = make_data_loader(
        cfg)
    model = build_model(cfg, num_classes)
    model.load_param(cfg.TEST.WEIGHT)

    results = []
    out_dir = os.path.join('AIC_crop')
    results += vis_actmap(model, cfg, train_loader, out_dir)
    results += vis_actmap(model, cfg, val_loader, out_dir)

    with open(os.path.join(out_dir, 'detection.json'), 'w') as f:
        json.dump(results, f)
Exemple #4
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Training")

    parser.add_argument(
        # "--config_file", default="/media/bi/Data/Mycode/car_demo/AICity2020-VOC-ReID-7c453723e6e9179d175772921f93441cfa621dc1/configs/aicity20.yml", help="path to config file", type=str
        "--config_file",
        default="data/veri.yml",
        help="path to config file",
        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        # with open(args.config_file, 'r') as cf:
        #     config_str = "\n" + cf.read()
        #     logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ[
            'CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID  # new add by gu
    cudnn.benchmark = True
    train(cfg)
Exemple #5
0
from paddle import amp
from paddle.static import InputSpec

from lib.utils.workspace import create
from lib.utils.checkpoint import load_weight, load_pretrain_weight
from lib.utils.visualizer import visualize_results, save_result
from lib.metrics.coco_utils import get_infer_results
from lib.metrics import KeyPointTopDownCOCOEval
from lib.dataset.category import get_categories
import lib.utils.stats as stats

from .callbacks import Callback, ComposeCallback, LogPrinter, Checkpointer, VisualDLWriter
from .export_utils import _dump_infer_config, _prune_input_spec

from lib.utils.logger import setup_logger
logger = setup_logger('hrnet.pose')

__all__ = ['Trainer']


class Trainer(object):
    def __init__(self, cfg, mode='train'):
        self.cfg = cfg
        assert mode.lower() in ['train', 'eval', 'test'], \
                "mode should be 'train', 'eval' or 'test'"
        self.mode = mode.lower()
        self.optimizer = None

        # init distillation config
        self.distill_model = None
        self.distill_loss = None
Exemple #6
0
    logger.info("Start inference")
    inference(model, test_loader, logger, cfg)
    logger.info("Inference Over\n")


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="TSI")
    parser.add_argument("config",
                        metavar="FILE",
                        type=str,
                        help="path to config file")
    parser.add_argument("num_gpus", type=int)
    parser.add_argument("--pretrain", type=str, default="None")
    args = parser.parse_args()

    # load settings
    cfg = load_config(args.config)
    cfg.num_gpus = args.num_gpus
    cfg.pretrain = args.pretrain
    output_dir = create_infer_folder(cfg)
    set_seed(2021)

    # setup logger
    logger = setup_logger("infer", output_dir)
    logger.info("Using {} GPUs".format(cfg.num_gpus))
    logger.info(cfg)

    # start inference
    test(cfg, logger)
Exemple #7
0
import six
import sys
if sys.version_info >= (3, 0):
    pass
else:
    pass
import numpy as np

from paddle.io import DataLoader, DistributedBatchSampler
from paddle.fluid.dataloader.collate import default_collate_fn

from lib.utils.workspace import register
from . import transform

from lib.utils.logger import setup_logger
logger = setup_logger('reader')

MAIN_PID = os.getpid()

__all__ = [
    'Compose', 'BatchCompose', 'BaseDataLoader', 'TrainReader', 'EvalReader',
    'TestReader'
]


class Compose(object):
    def __init__(self, transforms, num_classes=80):
        self.transforms = transforms
        self.transforms_cls = []
        for t in self.transforms:
            for k, v in t.items():
Exemple #8
0
parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))
sys.path.insert(0, parent_path)

# ignore warning log
import warnings
warnings.filterwarnings('ignore')
import glob

import paddle
from lib.utils.workspace import load_config, merge_config
from lib.slim import build_slim_model
from lib.core.trainer import Trainer
from lib.utils.check import check_gpu, check_version, check_config
from lib.utils.cli import ArgsParser
from lib.utils.logger import setup_logger
logger = setup_logger('train')


def parse_args():
    parser = ArgsParser()
    parser.add_argument("--infer_dir",
                        type=str,
                        default=None,
                        help="Directory for images to perform inference on.")
    parser.add_argument(
        "--infer_img",
        type=str,
        default=None,
        help="Image path, has higher priority over --infer_dir")
    parser.add_argument(
        "--output_dir",
Exemple #9
0
import os
import sys
import datetime
import six
import copy
import json

import paddle
import paddle.distributed as dist

from lib.utils.checkpoint import save_model
from lib.metrics.coco_utils import get_infer_results

from lib.utils.logger import setup_logger
logger = setup_logger('hrnet')

__all__ = [
    'Callback', 'ComposeCallback', 'LogPrinter', 'Checkpointer',
    'VisualDLWriter'
]


class Callback(object):
    def __init__(self, model):
        self.model = model

    def on_step_begin(self, status):
        pass

    def on_step_end(self, status):
Exemple #10
0
# ignore warning log
import warnings

warnings.filterwarnings('ignore')
import glob

import paddle
from lib.utils.workspace import load_config, merge_config
from lib.slim import build_slim_model
from lib.core.trainer import Trainer
from lib.utils.check import check_gpu, check_version, check_config
from lib.utils.cli import ArgsParser
from lib.utils.logger import setup_logger

logger = setup_logger('eval')


def parse_args():
    parser = ArgsParser()
    parser.add_argument(
        "--save-inference-dir",
        default='output_inference',
        type=str,
        help="Evaluation directory, default is current directory.")

    args = parser.parse_args()
    return args


def main():
Exemple #11
0
# limitations under the License.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
import sys
import numpy as np
import itertools

from .json_results import get_det_res, get_det_poly_res, get_seg_res, get_solov2_segm_res, get_keypoint_res
from .map_utils import draw_pr_curve

from lib.utils.logger import setup_logger
logger = setup_logger(__name__)

__all__ = ['get_infer_results', 'cocoapi_eval', 'json_eval_results']


def get_infer_results(outs, catid, bias=0):
    """
    Get result at the stage of inference.
    The output format is dictionary containing bbox or mask result.

    For example, bbox result is a list and each element contains
    image_id, category_id, bbox and score.
    """
    if outs is None or len(outs) == 0:
        raise ValueError(
            'The number of valid detection result if zero. Please use reasonable model and check input data.'
Exemple #12
0
        do_train(model, criterion, train_loader, logger, cfg, train_meters, optimizer)
        do_train(model, criterion, valid_loader, logger, cfg, valid_meters, test=True)
        scheduler.step()
    print_meter(logger, train_meters, valid_meters)

    logger.info("Train over. Best_epoch is {}\n".format(cfg.best_epoch))


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="TSI")
    parser.add_argument("config", metavar="FILE", type=str, help="path to config file")
    parser.add_argument("num_gpus", type=int)
    args = parser.parse_args()

    # load settings
    cfg = load_config(args.config)
    cfg.num_gpus = args.num_gpus
    set_seed(2021)

    # creat work folder
    output_dir = create_folder(cfg)

    # setup logger and save config
    logger = setup_logger("train", output_dir)
    logger.info("Using {} GPUs".format(cfg.num_gpus))
    logger.info(cfg)
    save_config(args.config, cfg.EXP_NAME)

    # start traning
    train(cfg, logger)
Exemple #13
0
                    help="Modify config options using the command-line",
                    default=None,
                    nargs=argparse.REMAINDER)
args = parser.parse_args()

if args.config_file != "":
    cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()

output_dir = cfg.OUTPUT_DIR
if output_dir and not os.path.exists(output_dir):
    mkdir(output_dir)

num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
logger = setup_logger("reid_baseline", output_dir, 0)
logger.info("Using {} GPUS".format(num_gpus))
# logger.info(args)

if args.config_file != "":
    logger.info("Loaded configuration file {}".format(
        args.config_file))  #--config_file='/code/configs/aicity20.yml'
    with open(args.config_file, 'r') as cf:
        config_str = "\n" + cf.read()
        logger.info(config_str)
# logger.info("Running with config:\n{}".format(cfg))


def main():
    global args
    if not args.use_avai_gpus:
Exemple #14
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument(
        "--config_file", default="./configs/submit.yml", help="path to config file", type=str
    )
    parser.add_argument("opts", help="Modify config options using the command-line", default=None,
                        nargs=argparse.REMAINDER)


    parser.add_argument(
        # "--config_file", default="/media/bi/Data/Mycode/car_demo/AICity2020-VOC-ReID-7c453723e6e9179d175772921f93441cfa621dc1/configs/aicity20.yml", help="path to config file", type=str
        "--pretrain_model_path", default="./dianshang/crop_half_model.pth", help="path to config file", type=str
    )

    parser.add_argument(
        # "--config_file", default="/media/bi/Data/Mycode/car_demo/AICity2020-VOC-ReID-7c453723e6e9179d175772921f93441cfa621dc1/configs/aicity20.yml", help="path to config file", type=str
        "--crop_path", default=" ", help="path to config file", type=str
    )

    args = parser.parse_args()
    num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)

    cfg.TEST.WEIGHT = args.pretrain_model_path

    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
    logger.info("Running with config:\n{}".format(cfg))



    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    train_loader, val_loader, num_query, num_classes, dataset = make_data_loader(cfg,shuffle_train= False)
    model = build_model(cfg, num_classes)
    print("load pretained weight",cfg.TEST.WEIGHT)
    # try:
    print("加载单卡训练权重")
    model.load_param(cfg.TEST.WEIGHT)


    results = []
    # out_dir = '/home/lab3/bi/0716/Veri/ai_city/tools/output/crop/crop_query_new'
    # out_dir = '/home/lab3/bi/0716/Veri/ai_city/tools/output/crop/crop_gallery_new'
    # out_dir = '/home/lab3/bi/0716/Veri/ai_city/tools/output/crop/crop_query'
    out_dir = args.crop_path
    if os.path.exists(os.path.join(out_dir,'crop_train')):
        print("文件夹已存在")
    else:
        os.makedirs(os.path.join(out_dir,'crop_train'))
        os.makedirs(os.path.join(out_dir,'crop_query'))
        os.makedirs(os.path.join(out_dir,'crop_gallery'))

    crop_train,crop_query,crop_gallery= vis_actmap(model, cfg, train_loader, out_dir)
    pickle.dump(crop_train, open(os.path.join(out_dir, 'train_crop_img_add.pkl'), 'wb'))
    crop_train,crop_query,crop_gallery= vis_actmap(model, cfg, val_loader, out_dir)
    pickle.dump((crop_query, crop_gallery), open(os.path.join(out_dir, 'test_crop_img_add.pkl'), 'wb'))