Exemplo n.º 1
0
def get_cfg():
    args = parse_args()

    print("Called with args:")
    print(args)
    args = set_dataset_args(args)
    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    print("Using config:")
    pprint.pprint(cfg)
    # np.random.seed(cfg.RNG_SEED)
    setup_seed(cfg.RNG_SEED)
    return args
Exemplo n.º 2
0
import torch.nn as nn
from roi_data_layer.roidb import combined_roidb
from roi_data_layer.roibatchLoader import roibatchLoader
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from model.utils.net_utils import weights_normal_init, save_net, load_net, \
    adjust_learning_rate, save_checkpoint, clip_gradient, FocalLoss, sampler, calc_supp, EFocalLoss

from model.utils.parser_func import parse_args, set_dataset_args

if __name__ == '__main__':

    args = parse_args()

    print('Called with args:')
    print(args)
    args = set_dataset_args(args)
    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    print('Using config:')
    pprint.pprint(cfg)
    np.random.seed(cfg.RNG_SEED)

    # torch.backends.cudnn.benchmark = True
    if torch.cuda.is_available() and not args.cuda:
        print(
            "WARNING: You have a CUDA device, so you should probably run with --cuda"
        )
Exemplo n.º 3
0
try:
    xrange  # Python 2
except NameError:
    xrange = range  # Python 3

lr = cfg.TRAIN.LEARNING_RATE
momentum = cfg.TRAIN.MOMENTUM
weight_decay = cfg.TRAIN.WEIGHT_DECAY

if __name__ == '__main__':

    args = parse_args()

    print('Called with args:')
    print(args)
    args = set_dataset_args(args, test=True)
    if torch.cuda.is_available() and not args.cuda:
        print(
            "WARNING: You have a CUDA device, so you should probably run with --cuda"
        )
    np.random.seed(cfg.RNG_SEED)

    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    print('Using config:')
    pprint.pprint(cfg)

    cfg.TRAIN.USE_FLIPPED = False
Exemplo n.º 4
0
def exp_htcn_mixed(cfg_file, output_dir, dataset_source, val_datasets, device,
                   net, optimizer, num_workers, lr, batch_size, start_epoch,
                   max_epochs, lr_decay_gamma, lr_decay_step, resume,
                   load_name, class_agnostic, debug, _run):

    args = Args(dataset=dataset_source,
                dataset_t="",
                cfg_file=cfg_file,
                net=net)
    args = set_dataset_args(args)

    args_val = Args(dataset=dataset_source,
                    dataset_t=val_datasets,
                    imdb_name_target=[],
                    cfg_file=cfg_file,
                    net=net)
    args_val = set_dataset_args(args_val, test=True)

    logger = LoggerForSacred(None, ex, True)

    if cfg_file is not None:
        cfg_from_file(cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    np.random.seed(cfg.RNG_SEED)

    cfg.TRAIN.USE_FLIPPED = True
    cfg.USE_GPU_NMS = True if device == 'cuda' else False
    device = torch.device(device)

    output_dir = output_dir + "_{}".format(_run._id)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    dataloader_s, _, imdb, imdb_t = init_dataloaders_1s_1t(
        args, batch_size, num_workers)
    val_dataloader_ts, val_imdb_ts = init_val_dataloaders_mt(
        args_val, 1, num_workers)

    session = 1
    fasterRCNN, lr, optimizer, session, start_epoch, _ = init_frcnn_utils.init_non_damodel_optimizer(
        lr,
        class_agnostic,
        device,
        imdb,
        load_name,
        net,
        optimizer,
        resume,
        session,
        start_epoch,
        is_all_params=True)

    if torch.cuda.device_count() > 1:
        fasterRCNN = nn.DataParallel(fasterRCNN)

    iters_per_epoch = int(10000 / batch_size)

    total_step = 0
    for epoch in range(start_epoch, max_epochs + 1):
        # setting to train mode
        fasterRCNN.train()

        if epoch - 1 in lr_decay_step:
            adjust_learning_rate(optimizer, lr_decay_gamma)
            lr *= lr_decay_gamma

        total_step = frcnn_utils.train_no_da_frcnn_one_epoch(
            args, total_step, dataloader_s, iters_per_epoch, fasterRCNN,
            optimizer, device, logger)

        save_name = os.path.join(
            output_dir,
            'source_train_{}_session_{}_epoch_{}_total_step_{}.pth'.format(
                dataset_source, session, epoch, total_step))
        save_checkpoint(
            {
                'session':
                session,
                'epoch':
                epoch + 1,
                'model':
                fasterRCNN.module.state_dict()
                if torch.cuda.device_count() > 1 else fasterRCNN.state_dict(),
                'optimizer':
                optimizer.state_dict(),
                'pooling_mode':
                cfg.POOLING_MODE,
                'class_agnostic':
                class_agnostic,
            }, save_name)
    return 0
Exemplo n.º 5
0
def exp_htcn_mixed(cfg_file, output_dir, dataset_source, dataset_target,
                   val_datasets, device, net, optimizer, num_workers,
                   teacher_pth, student_pth, lr, batch_size, start_epoch,
                   max_epochs, lr_decay_gamma, lr_decay_step, resume,
                   load_name, imitation_loss_weight, eta, gamma, ef,
                   class_agnostic, lc, gc, LA_ATT, MID_ATT, debug, _run):

    args_val = Args(dataset=dataset_source,
                    dataset_t=val_datasets,
                    imdb_name_target=[],
                    cfg_file=cfg_file,
                    net=net)
    args_val = set_dataset_args(args_val, test=True)

    logger = LoggerForSacred(None, ex, False)

    if cfg_file is not None:
        cfg_from_file(cfg_file)
    if args_val.set_cfgs is not None:
        cfg_from_list(args_val.set_cfgs)

    np.random.seed(cfg.RNG_SEED)

    cfg.TRAIN.USE_FLIPPED = True
    cfg.USE_GPU_NMS = True if device == 'cuda' else False
    device = torch.device(device)

    val_dataloader_ts, val_imdb_ts = init_frcnn_utils.init_val_dataloaders_mt(
        args_val, 1, num_workers)

    session = 1
    teacher = init_frcnn_utils.init_model_only(device,
                                               "res101",
                                               htcn_resnet,
                                               val_imdb_ts[0],
                                               teacher_pth,
                                               class_agnostic=class_agnostic,
                                               lc=lc,
                                               gc=gc,
                                               la_attention=LA_ATT,
                                               mid_attention=MID_ATT)
    fasterRCNN = init_frcnn_utils.init_model_only(
        device,
        "res50",
        htcn_resnet,
        val_imdb_ts[0],
        student_pth,
        class_agnostic=class_agnostic,
        lc=lc,
        gc=gc,
        la_attention=LA_ATT,
        mid_attention=MID_ATT)
    fasterRCNN_2 = init_frcnn_utils.init_model_only(
        device,
        "res50",
        htcn_resnet,
        val_imdb_ts[0],
        student_pth,
        class_agnostic=class_agnostic,
        lc=lc,
        gc=gc,
        la_attention=LA_ATT,
        mid_attention=MID_ATT)

    fasterRCNN.RCNN_rpn = teacher.RCNN_rpn

    if torch.cuda.device_count() > 1:
        fasterRCNN = nn.DataParallel(fasterRCNN)

    total_step = 0
    best_ap = 0.
    if isinstance(val_datasets, list):
        avg_ap = 0
        for i, val_dataloader_t in enumerate(val_dataloader_ts):
            map = frcnn_utils.eval_one_dataloader(output_dir, val_dataloader_t,
                                                  fasterRCNN, device,
                                                  val_imdb_ts[i])
            logger.log_scalar(
                "student with teacher rpn map on {}".format(val_datasets[i]),
                map, 0)
            map = frcnn_utils.eval_one_dataloader(output_dir, val_dataloader_t,
                                                  teacher, device,
                                                  val_imdb_ts[i])
            logger.log_scalar(
                "teacher original map on {}".format(val_datasets[i]), map, 0)
            teacher.RCNN_rpn = fasterRCNN_2.RCNN_rpn
            map = frcnn_utils.eval_one_dataloader(output_dir, val_dataloader_t,
                                                  teacher, device,
                                                  val_imdb_ts[i])
            logger.log_scalar(
                "teacher with stu rpn map on {}".format(val_datasets[i]), map,
                0)
Exemplo n.º 6
0
def exp_htcn_mixed(cfg_file, output_dir, dataset_source, dataset_target, val_datasets,
                    model_type, device, net, optimizer, num_workers, model_pth, class_agnostic, lc, gc, LA_ATT, MID_ATT,
                    debug, _run):

    args_val = Args(dataset=dataset_source, dataset_t=val_datasets, imdb_name_target=[], cfg_file=cfg_file, net=net)
    args_val = set_dataset_args(args_val, test=True)


    logger = LoggerForSacred(None, ex, True)

    if cfg_file is not None:
        cfg_from_file(cfg_file)
    if args_val.set_cfgs is not None:
        cfg_from_list(args_val.set_cfgs)

    np.random.seed(cfg.RNG_SEED)

    cfg.TRAIN.USE_FLIPPED = True
    cfg.USE_GPU_NMS = True if device == 'cuda' else False
    device = torch.device(device)

    val_dataloader_ts, val_imdb_ts = init_frcnn_utils.init_val_dataloaders_mt(args_val, 1, num_workers)

    session = 1
    backbone_fn = htcn_resnet
    if 'res' in net:
        if model_type == 'normal':
            backbone_fn = n_resnet
        elif model_type == 'saitp':
            backbone_fn = s_resnet
    else:
        if model_type == 'normal':
            backbone_fn = n_vgg16
        elif model_type == 'htcn':
            backbone_fn = htcn_vgg16
        elif model_type == 'saitp':
            backbone_fn = None


    model = init_frcnn_utils.init_model_only(device, net, backbone_fn, val_imdb_ts[0], model_pth, class_agnostic=class_agnostic, lc=lc,
                           gc=gc, la_attention=LA_ATT, mid_attention=MID_ATT)



    total_step = 0
    best_ap = 0.
    avg_ap = 0.
    avg_ap_per_class = {}
    if isinstance(val_datasets, list):
        for i, val_dataloader_t in enumerate(val_dataloader_ts):
            map, ap_per_class = frcnn_utils.eval_one_dataloader(output_dir, val_dataloader_t, model, device, val_imdb_ts[i], return_ap_class=True)
            logger.log_scalar(" map on {}".format(val_datasets[i]), map, 0)
            for cls, ap in ap_per_class.items():
                if cls in avg_ap_per_class:
                    avg_ap_per_class[cls] += ap
                else:
                    avg_ap_per_class[cls] = ap
            avg_ap += map
        avg_ap /= len(val_dataloader_ts)
        for cls, ap in avg_ap_per_class.items():
            ap /= len(val_dataloader_ts)
            logger.log_scalar(" map of class {}".format(cls), ap, 0)
    logger.log_scalar("avp map",avg_ap, 0)

    return avg_ap.item()
Exemplo n.º 7
0
def exp_htcn_mixed(cfg_file, output_dir, dataset_source, device, net,
                   optimizer, num_workers, lr, batch_size, start_epoch,
                   max_epochs, lr_decay_gamma, lr_decay_step, resume,
                   load_name, pretrained, eta, gamma, ef, class_agnostic, lc,
                   gc, LA_ATT, MID_ATT, debug, _run):

    args = Args(dataset=dataset_source,
                dataset_t=[],
                imdb_name_target=[],
                cfg_file=cfg_file,
                net=net)
    args = set_dataset_args(args)

    is_bgr = False
    if net in ['res101', 'res50', 'res152', 'vgg16']:
        is_bgr = True

    logger = LoggerForSacred(None, ex)

    if cfg_file is not None:
        cfg_from_file(cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    np.random.seed(cfg.RNG_SEED)

    cfg.TRAIN.USE_FLIPPED = True
    cfg.USE_GPU_NMS = True if device == 'cuda' else False
    device = torch.device(device)

    load_id = re.findall("\d+", load_name)[0]
    output_dir = output_dir + "_{}".format(load_id)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    dataloader_s, _, imdb, _ = init_dataloaders_1s_mt(args, batch_size,
                                                      num_workers, is_bgr)

    session = 1
    fasterRCNN = init_htcn_model(LA_ATT,
                                 MID_ATT,
                                 class_agnostic,
                                 device,
                                 gc,
                                 imdb,
                                 lc,
                                 load_name,
                                 net,
                                 strict=False,
                                 pretrained=pretrained)

    dtm = nn.Sequential(nn.Conv2d(3, 256, 1, stride=1, padding=0, bias=False),
                        nn.ReLU(), nn.Conv2d(256, 3, 1))

    dtm.to(device)
    optimizer = torch.optim.SGD(dtm.parameters(), lr=lr, momentum=0.9)

    if torch.cuda.device_count() > 1:
        fasterRCNN = nn.DataParallel(fasterRCNN)

    iters_per_epoch = int(10000 / batch_size)

    if ef:
        FL = EFocalLoss(class_num=2, gamma=gamma)
    else:
        FL = FocalLoss(class_num=2, gamma=gamma)

    dtm_util.get_mask_for_target(args, FL, 0, dataloader_s, iters_per_epoch,
                                 fasterRCNN, dtm, optimizer, device, logger)

    find_id = re.findall("\d+", load_name)
    if len(find_id) == 0:
        find_id = 0
    else:
        find_id = re.findall("\d+", load_name)[-1]
    torch.save(
        dtm,
        os.path.join(output_dir,
                     'dtm_target_cnn_{}_{}.p'.format(load_id, find_id)))

    return 0
Exemplo n.º 8
0
def exp_htcn_mixed(cfg_file, output_dir, dataset_source, dataset_target,
                   val_datasets, device, net, optimizer, num_workers, lr,
                   batch_size, start_epoch, max_epochs, lr_decay_gamma,
                   lr_decay_step, mask_load_p, resume, load_name, pretrained,
                   model_type, eta, gamma, ef, class_agnostic, lc, gc, LA_ATT,
                   MID_ATT, debug, _run):

    args = Args(dataset=dataset_source,
                dataset_t=dataset_target,
                cfg_file=cfg_file,
                net=net)
    args = set_dataset_args(args)
    is_bgr = False
    if net in ['res101', 'res50', 'res152', 'vgg16']:
        is_bgr = True

    logger = LoggerForSacred(None, ex, False)

    if cfg_file is not None:
        cfg_from_file(cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    np.random.seed(cfg.RNG_SEED)

    cfg.TRAIN.USE_FLIPPED = True
    cfg.USE_GPU_NMS = True if device == 'cuda' else False
    device = torch.device(device)

    backbone_fn = htcn_resnet
    if 'res' in net:
        if model_type == 'normal':
            backbone_fn = n_resnet
        elif model_type == 'saitp':
            backbone_fn = s_resnet
    else:
        if model_type == 'normal':
            backbone_fn = n_vgg16
        elif model_type == 'htcn':
            backbone_fn = htcn_vgg16
        elif model_type == 'saitp':
            backbone_fn = None
    dataloader_s, dataloader_t, imdb, imdb_t = init_dataloaders_1s_1t(
        args, batch_size, num_workers, is_bgr, False)
    model = init_frcnn_utils.init_model_only(device,
                                             net,
                                             backbone_fn,
                                             imdb_t,
                                             '',
                                             class_agnostic=class_agnostic,
                                             lc=lc,
                                             gc=gc,
                                             la_attention=LA_ATT,
                                             mid_attention=MID_ATT)
    model.eval()

    im_data = torch.randn(1, 3, 600, 1200).to(device)
    im_info = torch.FloatTensor([[600, 900, 2]]).to(device)
    gt_boxes = torch.zeros((1, 1, 5)).to(device)
    num_boxes = torch.zeros([1]).to(device)
    macs, params = profile(model,
                           inputs=(im_data, im_info, gt_boxes, num_boxes))
    macs, params = clever_format([macs, params], "%.3f")

    print("Model CFLOPS: {}".format(macs))
    print("Model Cparams: {}".format(params))

    random_mask = nn.Sequential(
        nn.Conv2d(3, 256, 1, stride=1, padding=0, bias=False), nn.ReLU(),
        nn.Conv2d(256, 3, 1)).to(device)

    macs, params = profile(random_mask, inputs=(im_data, ))
    macs, params = clever_format([macs, params], "%.3f")

    print("Mask CFLOPS: {}".format(macs))
    print("Mask Cparams: {}".format(params))

    iters_per_epoch = int(1000 / batch_size)
    data_iter_s = iter(dataloader_s)

    for step in range(1, iters_per_epoch + 1):
        try:
            data_s = next(data_iter_s)
        except:
            data_iter_s = iter(dataloader_s)
            data_s = next(data_iter_s)
        im_data = data_s[0].to(device)
        im_info = data_s[1].to(device)
        gt_boxes = data_s[2].to(device)
        num_boxes = data_s[3].to(device)

        pass
def exp_htcn_mixed(cfg_file, output_dir, dataset_source, dataset_target, val_datasets,
                    device, net, optimizer, num_workers,
                    lr, batch_size, start_epoch, max_epochs, lr_decay_gamma, lr_decay_step,
                    resume, load_name, pretrained,
                    eta, gamma, ef, class_agnostic, lc, gc, LA_ATT, MID_ATT,
                    debug, _run):

    args = Args(dataset=dataset_source, dataset_t=dataset_target, imdb_name_target=[], cfg_file=cfg_file, net=net)
    args = set_dataset_args(args)

    args_val = Args(dataset=dataset_source, dataset_t=val_datasets, imdb_name_target=[], cfg_file=cfg_file, net=net)
    args_val = set_dataset_args(args_val, test=True)

    is_bgr = False
    if net in ['res101', 'res50', 'res152', 'vgg16']:
        is_bgr = True


    logger = LoggerForSacred(None, ex, True)

    if cfg_file is not None:
        cfg_from_file(cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    np.random.seed(cfg.RNG_SEED)

    cfg.TRAIN.USE_FLIPPED = True
    cfg.USE_GPU_NMS = True if device == 'cuda' else False
    device = torch.device(device)

    output_dir = output_dir + "_{}".format(_run._id)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    dataloader_s, m_dataloader_t, imdb, m_imdb_t = init_dataloaders_1s_mt(args, batch_size, num_workers, is_bgr)
    val_dataloader_ts, val_imdb_ts = init_val_dataloaders_mt(args_val, 1, num_workers, is_bgr)

    session = 1
    fasterRCNN = init_htcn_model(LA_ATT, MID_ATT, class_agnostic, device, gc, imdb, lc, load_name, net, pretrained=pretrained)
    #fasterRCNN.re_init_da_layers(device)
    lr, optimizer, session, start_epoch = init_optimizer(lr, fasterRCNN, optimizer, resume, load_name, session, start_epoch, is_all_params=True)
    # _, optimizer_unsup, _, _ = init_optimizer(lr, fasterRCNN, optimizer, resume, load_name, session,
    #                                                      start_epoch, is_all_params=True)


    if torch.cuda.device_count() > 1:
        fasterRCNN = nn.DataParallel(fasterRCNN)

    iters_per_epoch = int(10000 / batch_size)

    if ef:
        FL = EFocalLoss(class_num=2, gamma=gamma)
    else:
        FL = FocalLoss(class_num=2, gamma=gamma)

    total_step = 0
    if resume:
        total_step = (start_epoch - 1) * 10000




    for epoch in range(start_epoch, max_epochs + 1):
        # setting to train mode
        fasterRCNN.train()

        if epoch - 1 in lr_decay_step:
            adjust_learning_rate(optimizer, lr_decay_gamma)
            lr *= lr_decay_gamma

        total_step = inc_frcnn_utils.train_htcn_one_epoch_inc_union(args, FL, total_step, dataloader_s, m_dataloader_t, iters_per_epoch, fasterRCNN, optimizer, device, eta, logger)

        save_name = os.path.join(output_dir,
                                 'target_{}_eta_{}_local_{}_global_{}_gamma_{}_session_{}_epoch_{}_total_step_{}.pth'.format(
                                     args.dataset_t, args.eta,
                                     lc, gc, gamma,
                                     session, epoch,
                                     total_step))
        save_checkpoint({
            'session': session,
            'epoch': epoch + 1,
            'model': fasterRCNN.module.state_dict() if torch.cuda.device_count() > 1 else fasterRCNN.state_dict(),
            'optimizer': optimizer.state_dict(),
            'pooling_mode': cfg.POOLING_MODE,
            'class_agnostic': class_agnostic,
        }, save_name)
    return 0