Exemple #1
0
def exp_htcn_mixed(cfg_file, output_dir, dataset_source, dataset_target,
                   val_datasets, device, net, optimizer, num_workers, lr,
                   batch_size, start_epoch, max_epochs, lr_decay_gamma,
                   lr_decay_step, resume, load_name, pretrained, eta, gamma,
                   ef, class_agnostic, lc, gc, LA_ATT, MID_ATT, debug, _run):

    args = Args(dataset=dataset_source,
                dataset_t=dataset_target,
                cfg_file=cfg_file,
                net=net)
    args = set_dataset_args(args)

    args_val = Args(dataset=dataset_source,
                    dataset_t=val_datasets,
                    imdb_name_target=[],
                    cfg_file=cfg_file,
                    net=net)
    args_val = set_dataset_args(args_val, test=True)

    is_bgr = False
    if net in ['res101', 'res50', 'res152', 'vgg16']:
        is_bgr = True

    logger = LoggerForSacred(None, ex, True)

    if cfg_file is not None:
        cfg_from_file(cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    np.random.seed(cfg.RNG_SEED)

    cfg.TRAIN.USE_FLIPPED = True
    cfg.USE_GPU_NMS = True if device == 'cuda' else False
    device = torch.device(device)

    output_dir = output_dir + "_{}".format(_run._id)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    dataloader_s, dataloader_t, imdb, imdb_t = init_dataloaders_1s_1t(
        args, batch_size, num_workers, is_bgr)

    session = 1
    fasterRCNN, lr, optimizer, session, start_epoch, _ = init_htcn_model_optimizer(
        lr,
        LA_ATT,
        MID_ATT,
        class_agnostic,
        device,
        gc,
        imdb,
        lc,
        load_name,
        net,
        optimizer,
        resume,
        session,
        start_epoch,
        pretrained=pretrained,
        is_all_params=False)

    if torch.cuda.device_count() > 1:
        fasterRCNN = nn.DataParallel(fasterRCNN)

    iters_per_epoch = int(10000 / batch_size)

    if ef:
        FL = EFocalLoss(class_num=2, gamma=gamma)
    else:
        FL = FocalLoss(class_num=2, gamma=gamma)

    total_step = 0
    for epoch in range(start_epoch, max_epochs + 1):
        # setting to train mode
        fasterRCNN.train()

        if epoch - 1 in lr_decay_step:
            adjust_learning_rate(optimizer, lr_decay_gamma)
            lr *= lr_decay_gamma

        total_step = frcnn_utils.train_htcn_one_epoch(
            args, FL, total_step, dataloader_s, dataloader_t, iters_per_epoch,
            fasterRCNN, optimizer, device, eta, logger)
        save_name = os.path.join(
            output_dir,
            'target_{}_eta_{}_local_{}_global_{}_gamma_{}_session_{}_epoch_{}_total_step_{}.pth'
            .format(args.dataset_t, args.eta, lc, gc, gamma, session, epoch,
                    total_step))
        save_checkpoint(
            {
                'session':
                session,
                'epoch':
                epoch + 1,
                'model':
                fasterRCNN.module.state_dict()
                if torch.cuda.device_count() > 1 else fasterRCNN.state_dict(),
                'optimizer':
                optimizer.state_dict(),
                'pooling_mode':
                cfg.POOLING_MODE,
                'class_agnostic':
                class_agnostic,
            }, save_name)
    return 0
Exemple #2
0
    if args.resume:
        checkpoint = torch.load(args.load_name)
        args.session = checkpoint['session']
        args.start_epoch = checkpoint['epoch']
        fasterRCNN.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        lr = optimizer.param_groups[0]['lr']
        if 'pooling_mode' in checkpoint.keys():
            cfg.POOLING_MODE = checkpoint['pooling_mode']
        print("loaded checkpoint %s" % (args.load_name))

    if args.mGPUs:
        fasterRCNN = nn.DataParallel(fasterRCNN)
    iters_per_epoch = int(10000 / args.batch_size)
    if args.ef:
        FL = EFocalLoss(class_num=2, gamma=args.gamma)
    else:
        FL = FocalLoss(class_num=2, gamma=args.gamma)

    if args.use_tfboard:
        from tensorboardX import SummaryWriter

        logger = SummaryWriter("logs")
    count_iter = 0
    for epoch in range(args.start_epoch, args.max_epochs + 1):
        # setting to train mode
        fasterRCNN.train()
        loss_temp = 0
        start = time.time()
        if epoch % (args.lr_decay_step + 1) == 0:
            adjust_learning_rate(optimizer, args.lr_decay_gamma)
Exemple #3
0
def exp_htcn_mixed(cfg_file, output_dir, dataset_source, device, net,
                   optimizer, num_workers, lr, batch_size, start_epoch,
                   max_epochs, lr_decay_gamma, lr_decay_step, resume,
                   load_name, pretrained, eta, gamma, ef, class_agnostic, lc,
                   gc, LA_ATT, MID_ATT, debug, _run):

    args = Args(dataset=dataset_source,
                dataset_t=[],
                imdb_name_target=[],
                cfg_file=cfg_file,
                net=net)
    args = set_dataset_args(args)

    is_bgr = False
    if net in ['res101', 'res50', 'res152', 'vgg16']:
        is_bgr = True

    logger = LoggerForSacred(None, ex)

    if cfg_file is not None:
        cfg_from_file(cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    np.random.seed(cfg.RNG_SEED)

    cfg.TRAIN.USE_FLIPPED = True
    cfg.USE_GPU_NMS = True if device == 'cuda' else False
    device = torch.device(device)

    load_id = re.findall("\d+", load_name)[0]
    output_dir = output_dir + "_{}".format(load_id)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    dataloader_s, _, imdb, _ = init_dataloaders_1s_mt(args, batch_size,
                                                      num_workers, is_bgr)

    session = 1
    fasterRCNN = init_htcn_model(LA_ATT,
                                 MID_ATT,
                                 class_agnostic,
                                 device,
                                 gc,
                                 imdb,
                                 lc,
                                 load_name,
                                 net,
                                 strict=False,
                                 pretrained=pretrained)

    dtm = nn.Sequential(nn.Conv2d(3, 256, 1, stride=1, padding=0, bias=False),
                        nn.ReLU(), nn.Conv2d(256, 3, 1))

    dtm.to(device)
    optimizer = torch.optim.SGD(dtm.parameters(), lr=lr, momentum=0.9)

    if torch.cuda.device_count() > 1:
        fasterRCNN = nn.DataParallel(fasterRCNN)

    iters_per_epoch = int(10000 / batch_size)

    if ef:
        FL = EFocalLoss(class_num=2, gamma=gamma)
    else:
        FL = FocalLoss(class_num=2, gamma=gamma)

    dtm_util.get_mask_for_target(args, FL, 0, dataloader_s, iters_per_epoch,
                                 fasterRCNN, dtm, optimizer, device, logger)

    find_id = re.findall("\d+", load_name)
    if len(find_id) == 0:
        find_id = 0
    else:
        find_id = re.findall("\d+", load_name)[-1]
    torch.save(
        dtm,
        os.path.join(output_dir,
                     'dtm_target_cnn_{}_{}.p'.format(load_id, find_id)))

    return 0