예제 #1
0
def exp_htcn_mixed(cfg_file, output_dir, dataset_source, val_datasets, device,
                   net, optimizer, num_workers, lr, batch_size, start_epoch,
                   max_epochs, lr_decay_gamma, lr_decay_step, resume,
                   load_name, class_agnostic, debug, _run):

    args = Args(dataset=dataset_source,
                dataset_t="",
                cfg_file=cfg_file,
                net=net)
    args = set_dataset_args(args)

    args_val = Args(dataset=dataset_source,
                    dataset_t=val_datasets,
                    imdb_name_target=[],
                    cfg_file=cfg_file,
                    net=net)
    args_val = set_dataset_args(args_val, test=True)

    logger = LoggerForSacred(None, ex, True)

    if cfg_file is not None:
        cfg_from_file(cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    np.random.seed(cfg.RNG_SEED)

    cfg.TRAIN.USE_FLIPPED = True
    cfg.USE_GPU_NMS = True if device == 'cuda' else False
    device = torch.device(device)

    output_dir = output_dir + "_{}".format(_run._id)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    dataloader_s, _, imdb, imdb_t = init_dataloaders_1s_1t(
        args, batch_size, num_workers)
    val_dataloader_ts, val_imdb_ts = init_val_dataloaders_mt(
        args_val, 1, num_workers)

    session = 1
    fasterRCNN, lr, optimizer, session, start_epoch, _ = init_frcnn_utils.init_non_damodel_optimizer(
        lr,
        class_agnostic,
        device,
        imdb,
        load_name,
        net,
        optimizer,
        resume,
        session,
        start_epoch,
        is_all_params=True)

    if torch.cuda.device_count() > 1:
        fasterRCNN = nn.DataParallel(fasterRCNN)

    iters_per_epoch = int(10000 / batch_size)

    total_step = 0
    for epoch in range(start_epoch, max_epochs + 1):
        # setting to train mode
        fasterRCNN.train()

        if epoch - 1 in lr_decay_step:
            adjust_learning_rate(optimizer, lr_decay_gamma)
            lr *= lr_decay_gamma

        total_step = frcnn_utils.train_no_da_frcnn_one_epoch(
            args, total_step, dataloader_s, iters_per_epoch, fasterRCNN,
            optimizer, device, logger)

        save_name = os.path.join(
            output_dir,
            'source_train_{}_session_{}_epoch_{}_total_step_{}.pth'.format(
                dataset_source, session, epoch, total_step))
        save_checkpoint(
            {
                'session':
                session,
                'epoch':
                epoch + 1,
                'model':
                fasterRCNN.module.state_dict()
                if torch.cuda.device_count() > 1 else fasterRCNN.state_dict(),
                'optimizer':
                optimizer.state_dict(),
                'pooling_mode':
                cfg.POOLING_MODE,
                'class_agnostic':
                class_agnostic,
            }, save_name)
    return 0
예제 #2
0
파일: exp_eval.py 프로젝트: Natlem/M-HTCN
def exp_htcn_mixed(cfg_file, output_dir, dataset_source, dataset_target, val_datasets,
                    model_type, device, net, optimizer, num_workers, model_pth, class_agnostic, lc, gc, LA_ATT, MID_ATT,
                    debug, _run):

    args_val = Args(dataset=dataset_source, dataset_t=val_datasets, imdb_name_target=[], cfg_file=cfg_file, net=net)
    args_val = set_dataset_args(args_val, test=True)


    logger = LoggerForSacred(None, ex, True)

    if cfg_file is not None:
        cfg_from_file(cfg_file)
    if args_val.set_cfgs is not None:
        cfg_from_list(args_val.set_cfgs)

    np.random.seed(cfg.RNG_SEED)

    cfg.TRAIN.USE_FLIPPED = True
    cfg.USE_GPU_NMS = True if device == 'cuda' else False
    device = torch.device(device)

    val_dataloader_ts, val_imdb_ts = init_frcnn_utils.init_val_dataloaders_mt(args_val, 1, num_workers)

    session = 1
    backbone_fn = htcn_resnet
    if 'res' in net:
        if model_type == 'normal':
            backbone_fn = n_resnet
        elif model_type == 'saitp':
            backbone_fn = s_resnet
    else:
        if model_type == 'normal':
            backbone_fn = n_vgg16
        elif model_type == 'htcn':
            backbone_fn = htcn_vgg16
        elif model_type == 'saitp':
            backbone_fn = None


    model = init_frcnn_utils.init_model_only(device, net, backbone_fn, val_imdb_ts[0], model_pth, class_agnostic=class_agnostic, lc=lc,
                           gc=gc, la_attention=LA_ATT, mid_attention=MID_ATT)



    total_step = 0
    best_ap = 0.
    avg_ap = 0.
    avg_ap_per_class = {}
    if isinstance(val_datasets, list):
        for i, val_dataloader_t in enumerate(val_dataloader_ts):
            map, ap_per_class = frcnn_utils.eval_one_dataloader(output_dir, val_dataloader_t, model, device, val_imdb_ts[i], return_ap_class=True)
            logger.log_scalar(" map on {}".format(val_datasets[i]), map, 0)
            for cls, ap in ap_per_class.items():
                if cls in avg_ap_per_class:
                    avg_ap_per_class[cls] += ap
                else:
                    avg_ap_per_class[cls] = ap
            avg_ap += map
        avg_ap /= len(val_dataloader_ts)
        for cls, ap in avg_ap_per_class.items():
            ap /= len(val_dataloader_ts)
            logger.log_scalar(" map of class {}".format(cls), ap, 0)
    logger.log_scalar("avp map",avg_ap, 0)

    return avg_ap.item()
예제 #3
0
def exp_htcn_mixed(cfg_file, output_dir, dataset_source, dataset_target,
                   val_datasets, device, net, optimizer, num_workers,
                   teacher_pth, student_pth, lr, batch_size, start_epoch,
                   max_epochs, lr_decay_gamma, lr_decay_step, resume,
                   load_name, imitation_loss_weight, eta, gamma, ef,
                   class_agnostic, lc, gc, LA_ATT, MID_ATT, debug, _run):

    args_val = Args(dataset=dataset_source,
                    dataset_t=val_datasets,
                    imdb_name_target=[],
                    cfg_file=cfg_file,
                    net=net)
    args_val = set_dataset_args(args_val, test=True)

    logger = LoggerForSacred(None, ex, False)

    if cfg_file is not None:
        cfg_from_file(cfg_file)
    if args_val.set_cfgs is not None:
        cfg_from_list(args_val.set_cfgs)

    np.random.seed(cfg.RNG_SEED)

    cfg.TRAIN.USE_FLIPPED = True
    cfg.USE_GPU_NMS = True if device == 'cuda' else False
    device = torch.device(device)

    val_dataloader_ts, val_imdb_ts = init_frcnn_utils.init_val_dataloaders_mt(
        args_val, 1, num_workers)

    session = 1
    teacher = init_frcnn_utils.init_model_only(device,
                                               "res101",
                                               htcn_resnet,
                                               val_imdb_ts[0],
                                               teacher_pth,
                                               class_agnostic=class_agnostic,
                                               lc=lc,
                                               gc=gc,
                                               la_attention=LA_ATT,
                                               mid_attention=MID_ATT)
    fasterRCNN = init_frcnn_utils.init_model_only(
        device,
        "res50",
        htcn_resnet,
        val_imdb_ts[0],
        student_pth,
        class_agnostic=class_agnostic,
        lc=lc,
        gc=gc,
        la_attention=LA_ATT,
        mid_attention=MID_ATT)
    fasterRCNN_2 = init_frcnn_utils.init_model_only(
        device,
        "res50",
        htcn_resnet,
        val_imdb_ts[0],
        student_pth,
        class_agnostic=class_agnostic,
        lc=lc,
        gc=gc,
        la_attention=LA_ATT,
        mid_attention=MID_ATT)

    fasterRCNN.RCNN_rpn = teacher.RCNN_rpn

    if torch.cuda.device_count() > 1:
        fasterRCNN = nn.DataParallel(fasterRCNN)

    total_step = 0
    best_ap = 0.
    if isinstance(val_datasets, list):
        avg_ap = 0
        for i, val_dataloader_t in enumerate(val_dataloader_ts):
            map = frcnn_utils.eval_one_dataloader(output_dir, val_dataloader_t,
                                                  fasterRCNN, device,
                                                  val_imdb_ts[i])
            logger.log_scalar(
                "student with teacher rpn map on {}".format(val_datasets[i]),
                map, 0)
            map = frcnn_utils.eval_one_dataloader(output_dir, val_dataloader_t,
                                                  teacher, device,
                                                  val_imdb_ts[i])
            logger.log_scalar(
                "teacher original map on {}".format(val_datasets[i]), map, 0)
            teacher.RCNN_rpn = fasterRCNN_2.RCNN_rpn
            map = frcnn_utils.eval_one_dataloader(output_dir, val_dataloader_t,
                                                  teacher, device,
                                                  val_imdb_ts[i])
            logger.log_scalar(
                "teacher with stu rpn map on {}".format(val_datasets[i]), map,
                0)
def exp_htcn_mixed(cfg_file, output_dir, dataset_source, dataset_target, val_datasets,
                    device, net, optimizer, num_workers,
                    lr, batch_size, start_epoch, max_epochs, lr_decay_gamma, lr_decay_step,
                    resume, load_name, pretrained,
                    eta, gamma, ef, class_agnostic, lc, gc, LA_ATT, MID_ATT,
                    debug, _run):

    args = Args(dataset=dataset_source, dataset_t=dataset_target, imdb_name_target=[], cfg_file=cfg_file, net=net)
    args = set_dataset_args(args)

    args_val = Args(dataset=dataset_source, dataset_t=val_datasets, imdb_name_target=[], cfg_file=cfg_file, net=net)
    args_val = set_dataset_args(args_val, test=True)

    is_bgr = False
    if net in ['res101', 'res50', 'res152', 'vgg16']:
        is_bgr = True


    logger = LoggerForSacred(None, ex, True)

    if cfg_file is not None:
        cfg_from_file(cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    np.random.seed(cfg.RNG_SEED)

    cfg.TRAIN.USE_FLIPPED = True
    cfg.USE_GPU_NMS = True if device == 'cuda' else False
    device = torch.device(device)

    output_dir = output_dir + "_{}".format(_run._id)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    dataloader_s, m_dataloader_t, imdb, m_imdb_t = init_dataloaders_1s_mt(args, batch_size, num_workers, is_bgr)
    val_dataloader_ts, val_imdb_ts = init_val_dataloaders_mt(args_val, 1, num_workers, is_bgr)

    session = 1
    fasterRCNN = init_htcn_model(LA_ATT, MID_ATT, class_agnostic, device, gc, imdb, lc, load_name, net, pretrained=pretrained)
    #fasterRCNN.re_init_da_layers(device)
    lr, optimizer, session, start_epoch = init_optimizer(lr, fasterRCNN, optimizer, resume, load_name, session, start_epoch, is_all_params=True)
    # _, optimizer_unsup, _, _ = init_optimizer(lr, fasterRCNN, optimizer, resume, load_name, session,
    #                                                      start_epoch, is_all_params=True)


    if torch.cuda.device_count() > 1:
        fasterRCNN = nn.DataParallel(fasterRCNN)

    iters_per_epoch = int(10000 / batch_size)

    if ef:
        FL = EFocalLoss(class_num=2, gamma=gamma)
    else:
        FL = FocalLoss(class_num=2, gamma=gamma)

    total_step = 0
    if resume:
        total_step = (start_epoch - 1) * 10000




    for epoch in range(start_epoch, max_epochs + 1):
        # setting to train mode
        fasterRCNN.train()

        if epoch - 1 in lr_decay_step:
            adjust_learning_rate(optimizer, lr_decay_gamma)
            lr *= lr_decay_gamma

        total_step = inc_frcnn_utils.train_htcn_one_epoch_inc_union(args, FL, total_step, dataloader_s, m_dataloader_t, iters_per_epoch, fasterRCNN, optimizer, device, eta, logger)

        save_name = os.path.join(output_dir,
                                 'target_{}_eta_{}_local_{}_global_{}_gamma_{}_session_{}_epoch_{}_total_step_{}.pth'.format(
                                     args.dataset_t, args.eta,
                                     lc, gc, gamma,
                                     session, epoch,
                                     total_step))
        save_checkpoint({
            'session': session,
            'epoch': epoch + 1,
            'model': fasterRCNN.module.state_dict() if torch.cuda.device_count() > 1 else fasterRCNN.state_dict(),
            'optimizer': optimizer.state_dict(),
            'pooling_mode': cfg.POOLING_MODE,
            'class_agnostic': class_agnostic,
        }, save_name)
    return 0