Exemple #1
0
def exp_htcn_mixed(cfg_file, output_dir, dataset_source, dataset_target,
                   val_datasets, device, net, optimizer, num_workers, lr,
                   batch_size, start_epoch, max_epochs, lr_decay_gamma,
                   lr_decay_step, mask_load_p, resume, load_name, pretrained,
                   model_type, eta, gamma, ef, class_agnostic, lc, gc, LA_ATT,
                   MID_ATT, debug, _run):

    args = Args(dataset=dataset_source,
                dataset_t=dataset_target,
                cfg_file=cfg_file,
                net=net)
    args = set_dataset_args(args)
    is_bgr = False
    if net in ['res101', 'res50', 'res152', 'vgg16']:
        is_bgr = True

    logger = LoggerForSacred(None, ex, False)

    if cfg_file is not None:
        cfg_from_file(cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    np.random.seed(cfg.RNG_SEED)

    cfg.TRAIN.USE_FLIPPED = True
    cfg.USE_GPU_NMS = True if device == 'cuda' else False
    device = torch.device(device)

    backbone_fn = htcn_resnet
    if 'res' in net:
        if model_type == 'normal':
            backbone_fn = n_resnet
        elif model_type == 'saitp':
            backbone_fn = s_resnet
    else:
        if model_type == 'normal':
            backbone_fn = n_vgg16
        elif model_type == 'htcn':
            backbone_fn = htcn_vgg16
        elif model_type == 'saitp':
            backbone_fn = None
    dataloader_s, dataloader_t, imdb, imdb_t = init_dataloaders_1s_1t(
        args, batch_size, num_workers, is_bgr, False)
    model = init_frcnn_utils.init_model_only(device,
                                             net,
                                             backbone_fn,
                                             imdb_t,
                                             '',
                                             class_agnostic=class_agnostic,
                                             lc=lc,
                                             gc=gc,
                                             la_attention=LA_ATT,
                                             mid_attention=MID_ATT)
    model.eval()

    im_data = torch.randn(1, 3, 600, 1200).to(device)
    im_info = torch.FloatTensor([[600, 900, 2]]).to(device)
    gt_boxes = torch.zeros((1, 1, 5)).to(device)
    num_boxes = torch.zeros([1]).to(device)
    macs, params = profile(model,
                           inputs=(im_data, im_info, gt_boxes, num_boxes))
    macs, params = clever_format([macs, params], "%.3f")

    print("Model CFLOPS: {}".format(macs))
    print("Model Cparams: {}".format(params))

    random_mask = nn.Sequential(
        nn.Conv2d(3, 256, 1, stride=1, padding=0, bias=False), nn.ReLU(),
        nn.Conv2d(256, 3, 1)).to(device)

    macs, params = profile(random_mask, inputs=(im_data, ))
    macs, params = clever_format([macs, params], "%.3f")

    print("Mask CFLOPS: {}".format(macs))
    print("Mask Cparams: {}".format(params))

    iters_per_epoch = int(1000 / batch_size)
    data_iter_s = iter(dataloader_s)

    for step in range(1, iters_per_epoch + 1):
        try:
            data_s = next(data_iter_s)
        except:
            data_iter_s = iter(dataloader_s)
            data_s = next(data_iter_s)
        im_data = data_s[0].to(device)
        im_info = data_s[1].to(device)
        gt_boxes = data_s[2].to(device)
        num_boxes = data_s[3].to(device)

        pass
Exemple #2
0
def exp_htcn_mixed(cfg_file, output_dir, dataset_source, val_datasets, device,
                   net, optimizer, num_workers, lr, batch_size, start_epoch,
                   max_epochs, lr_decay_gamma, lr_decay_step, resume,
                   load_name, class_agnostic, debug, _run):

    args = Args(dataset=dataset_source,
                dataset_t="",
                cfg_file=cfg_file,
                net=net)
    args = set_dataset_args(args)

    args_val = Args(dataset=dataset_source,
                    dataset_t=val_datasets,
                    imdb_name_target=[],
                    cfg_file=cfg_file,
                    net=net)
    args_val = set_dataset_args(args_val, test=True)

    logger = LoggerForSacred(None, ex, True)

    if cfg_file is not None:
        cfg_from_file(cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    np.random.seed(cfg.RNG_SEED)

    cfg.TRAIN.USE_FLIPPED = True
    cfg.USE_GPU_NMS = True if device == 'cuda' else False
    device = torch.device(device)

    output_dir = output_dir + "_{}".format(_run._id)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    dataloader_s, _, imdb, imdb_t = init_dataloaders_1s_1t(
        args, batch_size, num_workers)
    val_dataloader_ts, val_imdb_ts = init_val_dataloaders_mt(
        args_val, 1, num_workers)

    session = 1
    fasterRCNN, lr, optimizer, session, start_epoch, _ = init_frcnn_utils.init_non_damodel_optimizer(
        lr,
        class_agnostic,
        device,
        imdb,
        load_name,
        net,
        optimizer,
        resume,
        session,
        start_epoch,
        is_all_params=True)

    if torch.cuda.device_count() > 1:
        fasterRCNN = nn.DataParallel(fasterRCNN)

    iters_per_epoch = int(10000 / batch_size)

    total_step = 0
    for epoch in range(start_epoch, max_epochs + 1):
        # setting to train mode
        fasterRCNN.train()

        if epoch - 1 in lr_decay_step:
            adjust_learning_rate(optimizer, lr_decay_gamma)
            lr *= lr_decay_gamma

        total_step = frcnn_utils.train_no_da_frcnn_one_epoch(
            args, total_step, dataloader_s, iters_per_epoch, fasterRCNN,
            optimizer, device, logger)

        save_name = os.path.join(
            output_dir,
            'source_train_{}_session_{}_epoch_{}_total_step_{}.pth'.format(
                dataset_source, session, epoch, total_step))
        save_checkpoint(
            {
                'session':
                session,
                'epoch':
                epoch + 1,
                'model':
                fasterRCNN.module.state_dict()
                if torch.cuda.device_count() > 1 else fasterRCNN.state_dict(),
                'optimizer':
                optimizer.state_dict(),
                'pooling_mode':
                cfg.POOLING_MODE,
                'class_agnostic':
                class_agnostic,
            }, save_name)
    return 0
def exp_htcn_mixed(cfg_file, output_dir, dataset_source, dataset_target,
                   val_datasets, device, net, optimizer, num_workers, lr,
                   batch_size, start_epoch, max_epochs, lr_decay_gamma,
                   lr_decay_step, dtm_load_p, resume, load_name, pretrained,
                   eta, gamma, ef, class_agnostic, lc, gc, LA_ATT, MID_ATT,
                   alpha, debug, _run):

    args = Args(dataset=dataset_source,
                dataset_t=dataset_target,
                cfg_file=cfg_file,
                net=net)
    args = set_dataset_args(args)

    args_val = Args(dataset=dataset_source,
                    dataset_t=val_datasets,
                    imdb_name_target=[],
                    cfg_file=cfg_file,
                    net=net)
    args_val = set_dataset_args(args_val, test=True)

    is_bgr = False
    if net in ['res101', 'res50', 'res152', 'vgg16']:
        is_bgr = True

    logger = LoggerForSacred(None, ex, True)

    if cfg_file is not None:
        cfg_from_file(cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    np.random.seed(cfg.RNG_SEED)

    cfg.TRAIN.USE_FLIPPED = True
    cfg.USE_GPU_NMS = True if device == 'cuda' else False
    device = torch.device(device)

    output_dir = output_dir + "_{}".format(_run._id)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    dataloader_s, dataloader_t, imdb, imdb_t = init_dataloaders_1s_1t(
        args, batch_size, num_workers, is_bgr)

    session = 1
    fasterRCNN = init_htcn_model(LA_ATT,
                                 MID_ATT,
                                 class_agnostic,
                                 device,
                                 gc,
                                 imdb,
                                 lc,
                                 load_name,
                                 net,
                                 strict=False,
                                 pretrained=pretrained)
    lr, optimizer, session, start_epoch = init_optimizer(lr,
                                                         fasterRCNN,
                                                         optimizer,
                                                         resume,
                                                         load_name,
                                                         session,
                                                         start_epoch,
                                                         is_all_params=True)

    dtm = torch.load(dtm_load_p)
    dtm = dtm.to(device)
    dtm_dict = {}
    dtm_dict['dtm'] = dtm

    if torch.cuda.device_count() > 1:
        fasterRCNN = nn.DataParallel(fasterRCNN)

    iters_per_epoch = int(10000 / batch_size)

    if ef:
        FL = EFocalLoss(class_num=2, gamma=gamma)
    else:
        FL = FocalLoss(class_num=2, gamma=gamma)

    total_step = 0

    for epoch in range(start_epoch, max_epochs + 1):
        # setting to train mode
        fasterRCNN.train()

        if epoch - 1 in lr_decay_step:
            adjust_learning_rate(optimizer, lr_decay_gamma)
            lr *= lr_decay_gamma

        total_step = dtm_util.train_htcn_one_epoch_ida_with_dtm(
            args, FL, total_step, dataloader_s, dtm_dict, dataloader_t,
            iters_per_epoch, fasterRCNN, optimizer, device, eta, alpha, logger)

        save_name = os.path.join(
            output_dir,
            'target_{}_eta_{}_local_{}_global_{}_gamma_{}_session_{}_epoch_{}_total_step_{}.pth'
            .format(args.dataset_t, args.eta, lc, gc, gamma, session, epoch,
                    total_step))
        save_checkpoint(
            {
                'session':
                session,
                'epoch':
                epoch + 1,
                'model':
                fasterRCNN.module.state_dict()
                if torch.cuda.device_count() > 1 else fasterRCNN.state_dict(),
                'optimizer':
                optimizer.state_dict(),
                'pooling_mode':
                cfg.POOLING_MODE,
                'class_agnostic':
                class_agnostic,
            }, save_name)