Exemple #1
0
def train(cfg):
    logger = setup_logger(name='Train', level=cfg.LOGGER.LEVEL)
    logger.info(cfg)
    model = build_model(cfg)
    device = torch.device(cfg.MODEL.DEVICE)
    model.to(device)

    criterion = build_loss(cfg)

    optimizer = build_optimizer(cfg, model)
    scheduler = build_lr_scheduler(cfg, optimizer)

    train_loader = build_data(cfg, is_train=True)
    val_loader = build_data(cfg, is_train=False)

    logger.info(train_loader.dataset)
    logger.info(val_loader.dataset)

    arguments = dict()
    arguments["iteration"] = 0

    checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
    checkpointer = Checkpointer(model, optimizer, scheduler, cfg.SAVE_DIR)

    do_train(cfg, model, train_loader, val_loader, optimizer, scheduler,
             criterion, checkpointer, device, checkpoint_period, arguments,
             logger)
Exemple #2
0
def train(cfg):
    logger = setup_logger(name="Train", level=cfg.LOGGER.LEVEL)
    logger.info(cfg)
    model = build_model(cfg)
    device = torch.device(cfg.MODEL.DEVICE)
    model.to(device)
    if len(os.environ["CUDA_VISIBLE_DEVICES"]) > 1:
        model = torch.nn.DataParallel(model)

    criterion = build_loss(cfg)

    optimizer = build_optimizer(cfg, model)
    scheduler = build_lr_scheduler(cfg, optimizer)

    train_loader = build_data(cfg, is_train=True)
    val_loader = build_data(cfg, is_train=False)

    logger.info(train_loader.dataset)
    for x in val_loader:
        logger.info(x.dataset)

    arguments = dict()
    arguments["iteration"] = 0

    checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
    ckp_save_path = os.path.join(cfg.SAVE_DIR, cfg.NAME)

    os.makedirs(ckp_save_path, exist_ok=True)
    checkpointer = Checkpointer(model, optimizer, scheduler, ckp_save_path)

    tb_save_path = os.path.join(cfg.TB_SAVE_DIR, cfg.NAME)
    os.makedirs(tb_save_path, exist_ok=True)
    writer = SummaryWriter(tb_save_path)

    do_train(
        cfg,
        model,
        train_loader,
        val_loader,
        optimizer,
        scheduler,
        criterion,
        checkpointer,
        writer,
        device,
        checkpoint_period,
        arguments,
        logger,
    )
Exemple #3
0
def test(cfg):
    logger = setup_logger(name="Test", level=cfg.LOGGER.LEVEL)
    logger.info(cfg)
    model = torch.load('ms_loss_net_car_5000.pkl')
    model.eval()
    logger.info("Validation")

    val_loader = build_data(cfg, is_train=False)

    labels = val_loader.dataset.label_list
    labels = np.array([int(k) for k in labels])
    feats = feat_extractor(model, val_loader, logger=logger)

    ret_metric = RetMetric(feats=feats, labels=labels)
    recall_curr = ret_metric.recall_k(1)
    logger.info(f'Recall@1 : {recall_curr:.3f}')
Exemple #4
0
def test(cfg):
    logger = setup_logger(name="Train", level=cfg.LOGGER.LEVEL)
    logger.info(cfg)
    model = torch.load('net.pkl')
    model.eval()
    logger.info("Validation")

    val_loader = build_data(cfg, is_train=False)

    labels = val_loader[0].dataset.label_list
    labels = np.array([int(k) for k in labels])
    feats = feat_extractor(model, val_loader[0], logger=logger)
    ret_metric = AccuracyCalculator(include=("precision_at_1", "mean_average_precision_at_r", "r_precision"), exclude=())
    ret_metric = ret_metric.get_accuracy(feats, feats, labels, labels, True)
    mapr_curr = ret_metric['mean_average_precision_at_r']

    logger.info(f"ret_metric: {ret_metric}")
Exemple #5
0
def train(cfg):
    logger = setup_logger(name="Train", level=cfg.LOGGER.LEVEL)
    logger.info(cfg)
    model = build_model(cfg)
    device = torch.device(cfg.MODEL.DEVICE)
    model.to(device)

    criterion = build_loss(cfg)

    optimizer = build_optimizer(cfg, model)
    scheduler = build_lr_scheduler(cfg, optimizer)

    train_loader = build_data(cfg, cfg.DATA.TRAIN_IMG_SOURCE, is_train=True)
    query_loader = build_data(cfg,
                              cfg.DATA.TEST_QUERY_IMG_SOURCE,
                              is_train=False)
    logger.info(train_loader.dataset)
    logger.info(query_loader.dataset)
    gallery_loader = None
    if cfg.DATA.TEST_GALLERY_IMG_SOURCE:
        gallery_loader = build_data(cfg,
                                    cfg.DATA.TEST_GALLERY_IMG_SOURCE,
                                    is_train=False)
        logger.info(gallery_loader.dataset)

    arguments = dict()
    arguments["iteration"] = 0

    checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
    checkpointer = Checkpointer(model, optimizer, scheduler, cfg.SAVE_DIR)

    do_train(
        cfg,
        model,
        train_loader,
        query_loader,
        optimizer,
        scheduler,
        criterion,
        checkpointer,
        device,
        checkpoint_period,
        arguments,
        logger,
        gallery_loader=gallery_loader,
    )
Exemple #6
0
def extract(cfg, img_source, model_path=None):
    logger = setup_logger(name="Feat", level=cfg.LOGGER.LEVEL)

    model = build_model(cfg)
    device = torch.device(cfg.MODEL.DEVICE)
    model.to(device)

    if model_path is not None:
        print(f"load model {model_path} .......")
        model_dict = torch.load(model_path)["model"]
        model.load_state_dict(model_dict, strict=True)
        print("model successfully loaded")

    num_gpus = len(os.environ["CUDA_VISIBLE_DEVICES"].split(","))
    if num_gpus > 1:
        model = torch.nn.DataParallel(model)

    transforms = build_transforms(cfg, is_train=False)

    dataset = BaseDataSet(img_source,
                          False,
                          transforms=transforms,
                          mode=cfg.INPUT.MODE)
    data_loader = DataLoader(
        dataset,
        collate_fn=collate_fn,
        shuffle=False,
        batch_size=cfg.DATA.TEST_BATCHSIZE * num_gpus,
        num_workers=cfg.DATA.NUM_WORKERS,
        pin_memory=False,
    )

    labels = dataset.label_list
    feats = feat_extractor(model, data_loader, logger)

    day_time = time.strftime("%Y-%m-%d", time.localtime())
    npz_path = f"output/{day_time}_feat.npz"
    np.savez(npz_path, feat=feats, upc=labels)
    print(f"FEATS : \t {npz_path}")
def train(cfg):
    logger = setup_logger(name="Train", level=cfg.LOGGER.LEVEL)
    logger.info(cfg)
    train_loader = build_data(cfg, is_train=True)
    num_classes = max(set([int(i)
                           for i in train_loader.dataset.label_list])) + 1
    cfg.num_classes = num_classes
    criterion = build_loss(cfg.LOSSES.NAME, num_classes, cfg)
    train_loader = build_data(cfg, is_train=True)
    model = build_model(cfg)
    device = torch.device(cfg.MODEL.DEVICE)
    model.to(device)

    if isinstance(criterion, tuple):
        criterion, optimizer_center = criterion
        criterion = criterion.cuda()
        scheduler_center = build_lr_scheduler(cfg, optimizer_center)
    else:
        optimizer_center = None
        scheduler_center = None
    optimizer = build_optimizer(cfg, model)
    scheduler = build_lr_scheduler(cfg, optimizer)

    val_loader = build_data(cfg, is_train=False)

    trainVal_loader = build_trainVal_data(cfg, val_loader[0].dataset)

    if cfg.LOSSES.NAME_XBM_LOSS != 'same':
        criterion_xbm = build_loss(cfg.LOSSES.NAME_XBM_LOSS, num_classes, cfg)
    else:
        criterion_xbm = None

    logger.info(train_loader.dataset)
    logger.info(trainVal_loader.dataset)
    for x in val_loader:
        logger.info(x.dataset)

    arguments = dict()
    arguments["iteration"] = 0

    checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
    ckp_save_path = os.path.join(cfg.SAVE_DIR, cfg.NAME)
    os.makedirs(ckp_save_path, exist_ok=True)
    checkpointer = Checkpointer(model, optimizer, scheduler, ckp_save_path)

    tb_save_path = os.path.join(cfg.TB_SAVE_DIR, cfg.NAME)
    os.makedirs(tb_save_path, exist_ok=True)
    writer = SummaryWriter(tb_save_path)

    do_train(
        cfg,
        model,
        train_loader,
        trainVal_loader,
        val_loader,
        optimizer,
        optimizer_center,
        scheduler,
        scheduler_center,
        criterion,
        criterion_xbm,
        checkpointer,
        writer,
        device,
        arguments,
        logger,
    )
Exemple #8
0
import os

from collections import OrderedDict

import torch
from torch.nn.modules import Sequential

from modeling.registry import HEADS

from modeling.registry import BACKBONES

from .bninception import BNInception
from .resnet import ResNet50

os.environ["CUDA_VISIBLE_DEVICES"] = "1"
logger = setup_logger(name='Train', level=cfg.LOGGER.LEVEL)
logger.info(cfg)

model = Sequential(OrderedDict([('backbone', backbone), ('head', head)]))


def build_model(cfg):
    backbone = build_backbone(cfg)
    head = build_head(cfg)

    model = Sequential(OrderedDict([('backbone', backbone), ('head', head)]))

    if cfg.MODEL.PRETRAIN == 'imagenet':
        print('Loading imagenet pretrianed model ...')
        pretrained_path = os.path.expanduser(
            cfg.MODEL.PRETRIANED_PATH[cfg.MODEL.BACKBONE.NAME])