Пример #1
0
def train(
    work_dir,
    model_cfg,
    loss_cfg,
    dataset_cfg,
    optimizer_cfg,
    total_epochs,
    training_hooks,
    batch_size=None,
    gpu_batch_size=None,
    workflow=[('train', 1)],
    gpus=-1,
    log_level=0,
    workers=4,
    resume_from=None,
    load_from=None,
):

    # calculate batch size
    if gpus < 0:
        gpus = torch.cuda.device_count()
    if (batch_size is None) and (gpu_batch_size is not None):
        batch_size = gpu_batch_size * gpus
    assert batch_size is not None, 'Please appoint batch_size or gpu_batch_size.'

    # prepare data loaders
    if isinstance(dataset_cfg, dict):
        dataset_cfg = [dataset_cfg]
    data_loaders = [
        torch.utils.data.DataLoader(dataset=call_obj(**d),
                                    batch_size=batch_size,
                                    shuffle=True,
                                    num_workers=workers,
                                    drop_last=True) for d in dataset_cfg
    ]

    # put model on gpus
    if isinstance(model_cfg, list):
        model = [call_obj(**c) for c in model_cfg]
        model = torch.nn.Sequential(*model)
    else:
        model = call_obj(**model_cfg)
    model.apply(weights_init)

    model = MMDataParallel(model, device_ids=range(gpus)).cuda()
    loss = call_obj(**loss_cfg)

    # build runner
    optimizer = call_obj(params=model.parameters(), **optimizer_cfg)
    runner = Runner(model, batch_processor, optimizer, work_dir, log_level)
    runner.register_training_hooks(**training_hooks)

    if resume_from:
        runner.resume(resume_from)
    elif load_from:
        runner.load_checkpoint(load_from)

    # run
    workflow = [tuple(w) for w in workflow]
    runner.run(data_loaders, workflow, total_epochs, loss=loss)
Пример #2
0
def train(
    work_dir,
    model_cfg,
    loss_cfg,
    dataset_cfg,
    optimizer_cfg,
    batch_size,
    total_epochs,
    training_hooks,
    workflow=[('train', 1)],
    gpus=1,
    log_level=0,
    workers=2,
    resume_from=None,
    load_from=None,
):

    # prepare data loaders
    if isinstance(dataset_cfg, dict):
        dataset_cfg = [dataset_cfg]
    data_loaders = [
        torch.utils.data.DataLoader(dataset=call_obj(**d),
                                    batch_size=batch_size,
                                    shuffle=True,
                                    num_workers=workers,
                                    drop_last=True) for d in dataset_cfg
    ]

    # put model on gpus
    if isinstance(model_cfg, list):
        model = [call_obj(**c) for c in model_cfg]
        model = torch.nn.Sequential(*model)
    else:
        model = call_obj(**model_cfg)
    model.apply(weights_init)
    print("Model size: ",
          sum(p.numel() for p in model.parameters() if p.requires_grad))
    model = MMDataParallel(model, device_ids=range(gpus)).cuda()
    loss = call_obj(**loss_cfg)

    # build runner
    optimizer = call_obj(params=model.parameters(), **optimizer_cfg)
    runner = Runner(model, batch_processor, optimizer, work_dir, log_level)
    runner.register_training_hooks(**training_hooks)

    if resume_from:
        runner.resume(resume_from)
    elif load_from:
        runner.load_checkpoint(load_from)

    # run
    workflow = [tuple(w) for w in workflow]
    runner.run(data_loaders, workflow, total_epochs, loss=loss)

    writer.export_scalars_to_json("./all_scalars.json")
    writer.close()
def train(
    work_dir,
    model_cfg,
    dataset_cfg,
    batch_size,
    optimizer_cfg,
    total_epochs,
    training_hooks,
    workflow=[('train', 1)],
    gpus=1,
    log_level=0,
    workers=4,
    resume_from=None,
    load_from=None,
):
    # prepare data loaders
    if isinstance(dataset_cfg, dict):
        dataset_cfg = [dataset_cfg]
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    data_loaders = [
        torch.utils.data.DataLoader(
            dataset=call_obj(**d,
                             transform=transforms.Compose([
                                 transforms.ToTensor(),
                                 normalize,
                             ])),
            batch_size=batch_size * gpus,
            shuffle=True,
            num_workers=workers,
            drop_last=True) for d in dataset_cfg
    ]

    # put model on gpus
    if isinstance(model_cfg, list):
        model = [call_obj(**c) for c in model_cfg]
        model = torch.nn.Sequential(*model)
    else:
        model = call_obj(**model_cfg)
    model = MMDataParallel(model, device_ids=range(gpus)).cuda()
    # build runner
    optimizer = call_obj(params=model.parameters(), **optimizer_cfg)
    runner = Runner(model, batch_processor, optimizer, work_dir, log_level)
    runner.register_training_hooks(**training_hooks)

    if resume_from:
        runner.resume(resume_from)
    elif load_from:
        runner.load_checkpoint(load_from)
    # run
    workflow = [tuple(w) for w in workflow]
    runner.run(data_loaders, workflow, total_epochs)
Пример #4
0
    def __init__(self, cfg):
        super(TrackerCnnModule, self).__init__()
        model = build_detector(cfg.model,
                               train_cfg=cfg.train_cfg,
                               test_cfg=cfg.test_cfg)
        checkpoint = glob.glob(os.path.join(mmdet_dir, work_dir, '*.pth'))[0]
        load_checkpoint(model, checkpoint)
        model = MMDataParallel(model, device_ids=[0])

        self.bbox_roi_extractor = builder.build_roi_extractor(
            cfg.bbox_roi_extractor)

        for param in model.parameters():
            param.requires_grad = False

        self.detector = model.module
        self.flatten = Flatten()
        self.cfg = cfg
Пример #5
0
def train_flownet(model,
                  dataset,
                  cfg,
                  distributed=False,
                  validate=False,
                  logger=None):
    if logger is None:
        logger = get_root_logger(cfg.log_level)

    # start training
    # prepare data loaders
    data_loaders = [
        build_dataloader(dataset,
                         cfg.data.imgs_per_gpu,
                         cfg.data.workers_per_gpu,
                         cfg.gpus,
                         dist=False)
    ]

    # put model on gpus
    model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()

    # build runner
    runner = Runner(model, batch_processor, cfg.optimizer, cfg.work_dir,
                    cfg.log_level)
    runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config,
                                   cfg.checkpoint_config, cfg.log_config)

    # if cfg.resume_from:
    #     runner.resume(cfg.resume_from)
    # elif cfg.load_from:
    #     runner.load_checkpoint(cfg.load_from)
    model.eval()
    for param in model.parameters():
        param.requires_grad = False
    # model.load_flow()
    model.module.flow_head.train()
    for param in model.module.flow_head.parameters():
        param.requires_grad = True
    # training
    runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
Пример #6
0
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.data.test.test_mode = True

    if cfg.data.test.oversample == 'three_crop':
        cfg.model.spatial_temporal_module.spatial_size = 8

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    if args.gpus == 1:
        model = build_recognizer(cfg.model,
                                 train_cfg=None,
                                 test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint, strict=True)
        model = MMDataParallel(model, device_ids=[1])

        params = list(model.parameters())
        weight_softmax = np.squeeze(params[-2].data.cpu().numpy(
        ))  # fully conneted layer parameters to numpy already

        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            num_gpus=1,
            dist=False,
            shuffle=False)
        outputs, inputs = single_test(model, data_loader)
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(recognizers, model_args.pop('type'))
        outputs = parallel_test(model_type,
                                model_args,
                                args.checkpoint,
                                dataset,
                                _data_func,
                                range(args.gpus),
                                workers_per_gpu=args.proc_per_gpu)

    #print(len(features_blobs))
    #print(features_blobs[0].size())

    if args.out:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)

    num_videos = len(outputs)
    class_name = 'YoYo'
    os.mkdir('data/CAM_imgs/' + class_name)

    for k in range(0, num_videos):
        os.mkdir('data/CAM_imgs/' + class_name + '/CAMs_{:02d}'.format(k))
        idx = get_top_5_index("tools/results.pkl",
                              k)  # change the dir of results.pkl to tools/
        conv_feat = pickle.load(open(
            "tools/hook_features/feat_{:02d}.pkl".format(k), 'rb'),
                                encoding='utf-8')
        conv_feat = conv_feat.cpu().numpy()
        CAMs = returnCAM(
            conv_feat, weight_softmax,
            [idx[0]
             ])  # generate class activation mapping for the top1 prediction
        single_input = inputs[k].numpy()
        writeCAMs(class_name, CAMs, single_input, k)

    gt_labels = []
    for i in range(len(dataset)):
        ann = dataset.get_ann_info(i)
        gt_labels.append(ann['label'])

    if args.use_softmax:
        print("Averaging score over {} clips with softmax".format(
            outputs[0].shape[0]))
        results = [softmax(res, dim=1).mean(axis=0) for res in outputs]
    else:
        print("Averaging score over {} clips without softmax (ie, raw)".format(
            outputs[0].shape[0]))
        results = [res.mean(axis=0) for res in outputs]
    top1, top5 = top_k_accuracy(results, gt_labels, k=(1, 5))
    mean_acc = mean_class_accuracy(results, gt_labels)
    print("Mean Class Accuracy = {:.02f}".format(mean_acc * 100))
    print("Top-1 Accuracy = {:.02f}".format(top1 * 100))
    print("Top-5 Accuracy = {:.02f}".format(top5 * 100))
Пример #7
0
def _non_dist_train(model,
                    dataset,
                    cfg,
                    validate=False,
                    logger=None,
                    timestamp=None,
                    meta=None):
    # prepare data loaders
    dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
    data_loaders = [
        build_dataloader(ds,
                         cfg.data.imgs_per_gpu,
                         cfg.data.workers_per_gpu,
                         len(cfg.gpu_ids),
                         dist=False,
                         seed=cfg.seed) for ds in dataset
    ]
    # put model on gpus
    model = MMDataParallel(model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)

    # build runner
    if cfg.optimizer.type == 'SGD_GC':
        optimizer = SGD_GC(model.parameters(),
                           cfg.optimizer.lr,
                           momentum=cfg.optimizer.momentum,
                           weight_decay=cfg.optimizer.weight_decay)
    else:
        optimizer = build_optimizer(model, cfg.optimizer)
    runner = Runner(model,
                    batch_processor,
                    optimizer,
                    cfg.work_dir,
                    logger=logger,
                    meta=meta)
    # an ugly walkaround to make the .log and .log.json filenames the same
    runner.timestamp = timestamp
    # fp16 setting
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        optimizer_config = Fp16OptimizerHook(**cfg.optimizer_config,
                                             **fp16_cfg,
                                             distributed=False)
    else:
        optimizer_config = cfg.optimizer_config
    runner.register_training_hooks(cfg.lr_config, optimizer_config,
                                   cfg.checkpoint_config, cfg.log_config)

    # register eval hooks
    if validate:
        val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
        val_dataloader = build_dataloader(
            val_dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            dist=False,
            shuffle=False)
        eval_cfg = cfg.get('evaluation', {})
        runner.register_hook(EvalHook(val_dataloader, **eval_cfg))

    if cfg.resume_from:
        runner.resume(cfg.resume_from)
    elif cfg.load_from:
        runner.load_checkpoint(cfg.load_from)
    runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
Пример #8
0
def _non_dist_train(model, dataset, cfg, validate=False):
    # prepare data loaders
    dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
    data_loaders = []
    # for idx, ds in enumerate(dataset):
    #     if idx==0:
    #         data_loaders.append(build_dataloader(
    #             ds,
    #             cfg.data.imgs_per_gpu,
    #             cfg.data.workers_per_gpu,
    #             cfg.gpus,
    #             dist=False,
    #             cls_balanced_sampler=False))
    #     else:
    #         data_loaders.append(build_dataloader(
    #             ds,
    #             cfg.data.imgs_per_gpu,
    #             cfg.data.workers_per_gpu,
    #             cfg.gpus,
    #             dist=False,
    #             cls_balanced_sampler=True))
    data_loaders.append(
        build_dataloader(dataset[0],
                         cfg.data.imgs_per_gpu,
                         cfg.data.workers_per_gpu,
                         cfg.gpus,
                         dist=False,
                         cls_balanced_sampler=True))

    # put model on gpus
    model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()

    load_checkpoint(model, cfg.load_from)
    print('load from {}'.format(cfg.load_from))
    model.eval()
    for param in model.parameters():
        param.requires_grad = False

    # for param in model.parameters():
    #     param.requires_grad = False
    # model.eval()

    dataset_to_longtailmodel = {}
    dataset_to_longtailmodel['train'] = data_loaders[0]
    # dataset_to_longtailmodel['train_plain'] = data_loaders[1]
    # dataset_to_longtailmodel['val'] = data_loaders[2]

    ## which to use
    import datetime
    # cls_models = {
    #               1: '2fc_rand',
    #               2: '3fc_rand',
    #               3: '3fc_ft' }

    use_model = cfg.use_model
    print('use {}'.format(cfg.use_model))
    exp_prefix = cfg.exp_prefix
    total_epoch = 120
    initial_lr = 0.01

    if hasattr(dataset[0], 'coco'):
        if use_model == '2fc_rand':
            cls_head = simple2fc(num_classes=81).cuda()
        elif use_model == '3fc_rand' or '3fc_ft':
            cls_head = simple3fc(num_classes=81).cuda()

    elif hasattr(dataset[0], 'lvis'):
        if use_model == '2fc_rand':
            cls_head = simple2fc(num_classes=1231).cuda()
        elif use_model == '3fc_rand' or '3fc_ft':
            cls_head = simple3fc(num_classes=1231).cuda()

    optimizer = optim.SGD([{
        'params': cls_head.parameters(),
        'lr': initial_lr
    }])

    # for param in list(cls_head.parameters())[:-4]:
    #     param.requires_grad = False

    def save_ckpt(cls_head):
        save_checkpoint(
            cls_head, './{}/{}_{}.pth'.format(cfg.work_dir, exp_prefix,
                                              use_model))
        torch.save(
            epoch, './{}/{}_{}_epoch.pth'.format(cfg.work_dir, exp_prefix,
                                                 use_model))

    def load_ckpt(cls_head, use_model):

        if use_model == '2fc_rand' or use_model == '3fc_rand':
            if not os.path.exists('./{}_{}.pth'.format(exp_prefix, use_model)):
                print('start training from 0 epoch')
                return 0
            else:
                epoch = torch.load('./{}_{}_epoch.pth'.format(
                    exp_prefix, use_model))
                load_checkpoint(cls_head,
                                './{}_{}.pth'.format(exp_prefix, use_model))
                return epoch

        elif use_model == '3fc_ft':
            if not os.path.exists('./{}_{}.pth'.format(exp_prefix, use_model)):
                print(
                    'start training from 0 epoch, init from orig 3fc cls head')

                orig_head_state_dict = model.module.bbox_head.state_dict()
                key_map = {
                    'fc_cls.weight': 'feat_classifier.fc_classifier.weight',
                    'fc_cls.bias': 'feat_classifier.fc_classifier.bias',
                    'shared_fcs.0.weight': 'feat_classifier.fc1.weight',
                    'shared_fcs.0.bias': 'feat_classifier.fc1.bias',
                    'shared_fcs.1.weight': 'feat_classifier.fc2.weight',
                    'shared_fcs.1.bias': 'feat_classifier.fc2.bias'
                }
                new_state_dict = OrderedDict()

                for key, value in orig_head_state_dict.items():
                    if key in key_map:
                        new_key = key_map[key]
                        new_state_dict[new_key] = value

                cls_head.load_state_dict(new_state_dict)
                return 0
            else:
                epoch = torch.load('./{}_{}_epoch.pth'.format(
                    exp_prefix, use_model))
                load_checkpoint(cls_head,
                                './{}_{}.pth'.format(exp_prefix, use_model))
                return epoch

    epoch = load_ckpt(cls_head, use_model)
    cls_head = MMDataParallel(cls_head, device_ids=range(cfg.gpus)).cuda()

    for epoch in range(epoch + 1, total_epoch + 1):

        ##due to schdualer bug, we do manual lr schedule
        if epoch >= 8:
            for param_group in optimizer.param_groups:
                param_group['lr'] = initial_lr * 0.1
        if epoch >= 11:
            for param_group in optimizer.param_groups:
                param_group['lr'] = initial_lr * 0.01
        print('epoch {} lr {}'.format(epoch, optimizer.param_groups[0]['lr']))

        for step, data_batch in enumerate(dataset_to_longtailmodel['train']):
            if step % 10 == 0:
                print('step {} time: {}'.format(step, datetime.datetime.now()))
                torch.cuda.empty_cache()

            bs = dataset_to_longtailmodel['train'].batch_size
            nc = dataset_to_longtailmodel['train'].sampler.nc
            data_cls_index = np.split(
                dataset_to_longtailmodel['train'].sampler.class_indices,
                dataset_to_longtailmodel['train'].sampler.episode, 0)[step]

            neg_feats, gt_inds, gt_num, pos_feats, pos_label = model(
                **data_batch)
            samples_convert, label_converts = compose_training_data(
                neg_feats, gt_inds, gt_num, pos_feats, pos_label,
                data_cls_index, bs, nc)

            logits = cls_head(samples_convert)

            log_p_y = F.log_softmax(logits, dim=1).view(logits.size(0), -1)

            loss_target = -log_p_y.gather(
                1, label_converts.unsqueeze(1)).squeeze().view(-1).mean()

            loss_val = 1.0 * loss_target

            _, y_hat = log_p_y.max(1)
            acc_val = torch.eq(y_hat, label_converts).float().mean()
            bg_acc = (y_hat[label_converts == 0]
                      == 0).sum() / (label_converts == 0).sum().float()
            fg_acc = torch.eq(
                y_hat, label_converts).float()[label_converts != 0].mean()

            if step % 10 == 0:
                print('step {} acc: {}'.format(step, acc_val.item()))
                print('step {} bg acc: {}'.format(step, bg_acc.item()))
                print('step {} fg acc: {}'.format(step, fg_acc.item()))

            optimizer.zero_grad()
            loss_val.backward()

            optimizer.step()

        save_ckpt(cls_head)
Пример #9
0
def main():
    args = parse_args()
    cfg = Config.fromfile(args.cfg)
    work_dir = cfg.work_dir
    os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
        str(device_id) for device_id in cfg.device_ids)
    log_dir = os.path.join(work_dir, 'logs')
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    logger = init_logger(log_dir)
    seed = cfg.seed
    logger.info('Set random seed to {}'.format(seed))
    set_random_seed(seed)

    train_dataset = get_dataset(cfg.data.train)
    train_data_loader = build_dataloader(
        train_dataset,
        cfg.data.imgs_per_gpu,
        cfg.data.workers_per_gpu,
        len(cfg.device_ids),
        dist=False,
    )
    val_dataset = get_dataset(cfg.data.val)
    val_data_loader = build_dataloader(val_dataset,
                                       1,
                                       cfg.data.workers_per_gpu,
                                       1,
                                       dist=False,
                                       shuffle=False)

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)
    model = MMDataParallel(model).cuda()
    optimizer = obj_from_dict(cfg.optimizer, torch.optim,
                              dict(params=model.parameters()))
    lr_scheduler = obj_from_dict(cfg.lr_scedule, LRschedule,
                                 dict(optimizer=optimizer))

    checkpoint_dir = os.path.join(cfg.work_dir, 'checkpoint_dir')
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    start_epoch = cfg.start_epoch
    if cfg.resume_from:
        checkpoint = load_checkpoint(model, cfg.resume_from)
        start_epoch = 0
        logger.info('resumed epoch {}, from {}'.format(start_epoch,
                                                       cfg.resume_from))

    log_buffer = LogBuffer()
    for epoch in range(start_epoch, cfg.end_epoch):
        train(train_data_loader, model, optimizer, epoch, lr_scheduler,
              log_buffer, cfg, logger)
        tmp_checkpoint_file = os.path.join(checkpoint_dir, 'tmp_val.pth')
        meta_dict = cfg._cfg_dict
        logger.info('save tmp checkpoint to {}'.format(tmp_checkpoint_file))
        save_checkpoint(model, tmp_checkpoint_file, optimizer, meta=meta_dict)
        if len(cfg.device_ids) == 1:
            sensitivity = val(val_data_loader, model, cfg, logger, epoch)
        else:
            model_args = cfg.model.copy()
            model_args.update(train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
            model_type = getattr(detectors, model_args.pop('type'))
            results = parallel_test(
                cfg,
                model_type,
                model_args,
                tmp_checkpoint_file,
                val_dataset,
                np.arange(len(cfg.device_ids)).tolist(),
                workers_per_gpu=1,
            )

            sensitivity = evaluate_deep_lesion(results, val_dataset,
                                               cfg.cfg_3dce, logger)
        save_file = os.path.join(
            checkpoint_dir, 'epoch_{}_sens@4FP_{:.5f}_{}.pth'.format(
                epoch + 1, sensitivity,
                time.strftime('%m-%d-%H-%M', time.localtime(time.time()))))
        os.rename(tmp_checkpoint_file, save_file)
        logger.info('save checkpoint to {}'.format(save_file))
        if epoch > cfg.lr_scedule.T_max:
            os.remove(save_file)