Exemple #1
0
    def __init__(self,
                 myargv=None,
                 detectron_dir=os.path.expanduser(
                     os.path.join('~', 'libraries', 'detectron'))):
        workspace.GlobalInit(['caffe2'])
        setup_logging(__name__)
        self.logger = logging.getLogger(__name__)
        args = parse_args(myargv)

        # config
        # model_config_filename=os.path.join(detectron_dir, 'configs/12_2017_baselines/e2e_keypoint_rcnn_X-101-32x8d-FPN_s1x.yaml')
        model_config_filename = os.path.join(
            detectron_dir,
            'configs/12_2017_baselines/e2e_keypoint_rcnn_R-50-FPN_s1x.yaml')
        args.cfg = model_config_filename
        merge_cfg_from_file(args.cfg)
        cfg.NUM_GPUS = 1

        # weights
        # model_weights = 'https://dl.fbaipublicfiles.com/detectron/37732318/12_2017_baselines/e2e_keypoint_rcnn_X-101-32x8d-FPN_s1x.yaml.16_55_09.Lx8H5JVu/output/train/keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival/generalized_rcnn/model_final.pkl'
        model_weights = 'https://dl.fbaipublicfiles.com/detectron/37697714/12_2017_baselines/e2e_keypoint_rcnn_R-50-FPN_s1x.yaml.08_44_03.qrQ0ph6M/output/train/keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival/generalized_rcnn/model_final.pkl'
        cfg.DOWNLOAD_CACHE = os.path.join(detectron_dir,
                                          'detectron-download-cache')
        args.weights = cache_url(model_weights, cfg.DOWNLOAD_CACHE)

        assert_and_infer_cfg(cache_urls=False)

        assert not cfg.MODEL.RPN_ONLY, \
            'RPN models are not supported'
        assert not cfg.TEST.PRECOMPUTED_PROPOSALS, \
            'Models that require precomputed proposals are not supported'

        self.model = infer_engine.initialize_model_from_cfg(args.weights)
        self.dummy_coco_dataset = dummy_datasets.get_coco_dataset()
Exemple #2
0
def get_detectron_result(im):
    """
    功能: 获取传入图像的检测结果

    输入参数列表:
        im: BGR格式图片
    返回参数列表:
        img: 检测结果图片
        detectron_result_info: 检测结果字典
    """
    setup_logging(__name__)
    logger = logging.getLogger(__name__)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()
    cfg_orig = load_cfg(yaml.dump(cfg))

    if rpn_pkl is not None:
        proposal_boxes, _proposal_scores = get_rpn_box_proposals(im)
        workspace.ResetWorkspace()
    else:
        proposal_boxes = None

    cls_boxes, cls_segms, cls_keyps = None, None, None
    pkl = rpn_pkl
    yml = rpn_cfg
    cfg.immutable(False)
    merge_cfg_from_cfg(cfg_orig)
    merge_cfg_from_file(yml)
    if len(pkl) > 0:
        weights_file = pkl
    else:
        weights_file = cfg.TEST.WEIGHTS
    cfg.NUM_GPUS = 1
    assert_and_infer_cfg(cache_urls=False)
    model = model_engine.initialize_model_from_cfg(weights_file)
    with c2_utils.NamedCudaScope(0):
        cls_boxes_, cls_segms_, cls_keyps_ = \
            model_engine.im_detect_all(model, im, proposal_boxes)
    cls_boxes = cls_boxes_ if cls_boxes_ is not None else cls_boxes
    cls_segms = cls_segms_ if cls_segms_ is not None else cls_segms
    cls_keyps = cls_keyps_ if cls_keyps_ is not None else cls_keyps
    workspace.ResetWorkspace()

    img, detectron_result_info = vis_utils.vis_one_image_opencv(
        im,
        cls_boxes,
        cls_segms,
        cls_keyps,
        dataset=dummy_coco_dataset,
        show_box=True,
        show_class=True)

    return img, detectron_result_info
Exemple #3
0
def main():
    # Initialize C2
    workspace.GlobalInit(
        ['caffe2', '--caffe2_log_level=0', '--caffe2_gpu_memory_tracking=1'])
    # Set up logging and load config options
    logger = setup_logging(__name__)
    logging.getLogger('detectron.roi_data.loader').setLevel(logging.INFO)
    args = parse_args()
    logger.info('Called with args:')
    logger.info(args)
    if args.cfg_file is not None:
        merge_cfg_from_file(args.cfg_file)
    if args.opts is not None:
        merge_cfg_from_list(args.opts)
    assert_and_infer_cfg()
    logger.info('Training with config:')
    logger.info(pprint.pformat(cfg))
    # Note that while we set the numpy random seed network training will not be
    # deterministic in general. There are sources of non-determinism that cannot
    # be removed with a reasonble execution-speed tradeoff (such as certain
    # non-deterministic cudnn functions).
    np.random.seed(cfg.RNG_SEED)
    # Execute the training run
    checkpoints = detectron.utils.train.train_model()
    # Test the trained model
    if not args.skip_test:
        test_model(checkpoints['final'], args.multi_gpu_testing, args.opts)
Exemple #4
0
def main():
    # Initialize C2
    workspace.GlobalInit(
        ['caffe2', '--caffe2_log_level=0', '--caffe2_gpu_memory_tracking=1'])
    # Set up logging and load config options
    logger = setup_logging(__name__)
    logging.getLogger('detectron.roi_data.loader').setLevel(logging.INFO)
    args = parse_args()
    logger.info('Called with args:')
    logger.info(args)
    if args.cfg_file is not None:
        merge_cfg_from_file(args.cfg_file)
    if args.opts is not None:
        merge_cfg_from_list(args.opts)
    assert_and_infer_cfg()
    smi_output, cuda_ver, cudnn_ver = c2_utils.get_nvidia_info()
    logger.info("cuda version : {}".format(cuda_ver))
    logger.info("cudnn version: {}".format(cudnn_ver))
    logger.info("nvidia-smi output:\n{}".format(smi_output))
    logger.info('Training with config:')
    logger.info(pprint.pformat(cfg))
    # Note that while we set the numpy random seed network training will not be
    # deterministic in general. There are sources of non-determinism that cannot
    # be removed with a reasonble execution-speed tradeoff (such as certain
    # non-deterministic cudnn functions).
    np.random.seed(cfg.RNG_SEED)
    # Execute the training run
    checkpoints = detectron.utils.train_wsl.train_model()
    # Test the trained model
    if not args.skip_test:
        test_model(checkpoints['final'], args.multi_gpu_testing, args.opts)
        print('reprint snapshot name for the result: ', checkpoints['final'])

        if 'voc_' in cfg.TRAIN.DATASETS[0]:
            TEST_DATASETS = cfg.TEST.DATASETS
            TEST_PROPOSAL_FILES = cfg.TEST.PROPOSAL_FILES
            cfg.immutable(False)
            cfg.TEST.DATASETS = cfg.TRAIN.DATASETS
            cfg.TEST.PROPOSAL_FILES = cfg.TRAIN.PROPOSAL_FILES
            cfg.immutable(True)
            test_model(checkpoints['final'], args.multi_gpu_testing, args.opts)
            print('reprint snapshot name for the result: ',
                  checkpoints['final'])

            cfg.immutable(False)
            cfg.TEST.DATASETS = TEST_DATASETS
            cfg.TEST.PROPOSAL_FILES = TEST_PROPOSAL_FILES
            cfg.immutable(True)

        cfg.immutable(False)
        cfg.TEST.BBOX_AUG.ENABLED = False
        cfg.VIS = False
        cfg.immutable(True)

        _ = checkpoints.pop('final', None)
        for snapshot in sorted(checkpoints.keys(), reverse=True):
            test_model(checkpoints[snapshot], args.multi_gpu_testing,
                       args.opts)
            print('reprint snapshot name for the result: ', snapshot,
                  checkpoints[snapshot])
Exemple #5
0
def main():
    # Initialize C2
    workspace.GlobalInit(
        ['caffe2', '--caffe2_log_level=0', '--caffe2_gpu_memory_tracking=1']
    )
    # Set up logging and load config options
    logger = setup_logging(__name__)
    logging.getLogger('detectron.roi_data.loader').setLevel(logging.INFO)
    args = parse_args()
    logger.info('Called with args:')
    logger.info(args)
    if args.cfg_file is not None:
        merge_cfg_from_file(args.cfg_file)
    if args.opts is not None:
        merge_cfg_from_list(args.opts)
    assert_and_infer_cfg()
    smi_output, cuda_ver, cudnn_ver = c2_utils.get_nvidia_info()
    logger.info("cuda version : {}".format(cuda_ver))
    logger.info("cudnn version: {}".format(cudnn_ver))
    logger.info("nvidia-smi output:\n{}".format(smi_output))
    logger.info('Training with config:')
    logger.info(pprint.pformat(cfg))
    # Note that while we set the numpy random seed network training will not be
    # deterministic in general. There are sources of non-determinism that cannot
    # be removed with a reasonble execution-speed tradeoff (such as certain
    # non-deterministic cudnn functions).
    np.random.seed(cfg.RNG_SEED)
    # Execute the training run
    checkpoints = detectron.utils.train.train_model()
    # Test the trained model
    if not args.skip_test:
        test_model(checkpoints['final'], args.multi_gpu_testing, args.opts)
Exemple #6
0
    def beginTrain(self):
        workspace.GlobalInit([
            'caffe2', '--caffe2_log_level=0', '--caffe2_gpu_memory_tracking=1'
        ])
        # Set up logging and load config options
        logger = setup_logging(__name__)
        logging.getLogger('detectron.roi_data.loader').setLevel(logging.INFO)
        smi_output, cuda_ver, cudnn_ver = c2_utils.get_nvidia_info()
        logger.info("cuda version : {}".format(cuda_ver))
        logger.info("cudnn version: {}".format(cudnn_ver))
        logger.info("nvidia-smi output:\n{}".format(smi_output))
        logger.info('Training with config:')
        logger.info(pformat(cfg))
        # Note that while we set the numpy random seed network training will not be
        # deterministic in general. There are sources of non-determinism that cannot
        # be removed with a reasonble execution-speed tradeoff (such as certain
        # non-deterministic cudnn functions).
        np.random.seed(cfg.RNG_SEED)
        # Execute the training run
        checkpoints, losses = train_model()
        # Test the trained model
        self.test_model(checkpoints["final"])
        dataset_name, _ = get_inference_dataset(0)
        output_dir = get_output_dir(dataset_name, training=False)
        with open(osp.join(output_dir, "res.pkl"), "rb") as src:
            mAP = pickle.load(src)

        return losses, mAP
Exemple #7
0
        resume_weights_file = f
        weights_file = os.path.join(cfg.TEST.WEIGHTS, resume_weights_file)
        logger.info(
            '========> Resuming from checkpoint {} at iter {}'.
                format(weights_file, checkpoint_iter)
        )

        run_inference(
            weights_file,
            ind_range=args.range,
            multi_gpu_testing=args.multi_gpu_testing,
            check_expected_results=True,
        )


if __name__ == '__main__':
    workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
    logger = setup_logging(__name__)
    args = parse_args()
    logger.info('Called with args:')
    logger.info(args)
    if args.cfg_file is not None:
        merge_cfg_from_file(args.cfg_file)
    if args.opts is not None:
        merge_cfg_from_list(args.opts)
    assert_and_infer_cfg()
    logger.info('Testing with config:')
    logger.info(pprint.pformat(cfg))

    checkNewCheckpoint(args, cfg, logger)
                        ["det/proposals.pkl"])), 'r'))
                else:
                    proposals = None
                head, tail = os.path.split(output_file) 
                if not os.path.exists(head):
                    os.makedirs(head)
                start = time.time()
                # Run inference
                infer_track_sequence(model, im_dir, tracking, proposals=proposals,
                    vis=vis, det_file=output_file)
                delta = time.time() - start
                freq = float(len(os.listdir(im_dir))) / delta
                timing.append(freq)

            # Save evaluation results
            if EVAL:
                val_directory = os.path.abspath(head) + "/"
                eval_datections(val_directory,
                    os.path.abspath(os.path.join(*im_dir.split("/")[:-2])) + "/")
                with open(val_directory + "eval.txt", "r") as f:
                    temp = f.readline().strip()
                with open(val_directory + "eval.txt", "w+") as f:
                    f.write("{},{}".format(temp, np.average(timing)))


if __name__ == '__main__':
    workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
    setup_logging(__name__)
    args = parse_args()
    main(args)
Exemple #9
0
        B = np.random.randn(2, 3, 5).astype(np.float32)
        self._run_test(A, B, check_grad=True)

    def test_large_forward(self):
        A = np.random.randn(2, 256, 42, 100).astype(np.float32)
        B = np.random.randn(2, 256, 35, 87).astype(np.float32)
        self._run_test(A, B)

        A = np.random.randn(2, 256, 42, 87).astype(np.float32)
        B = np.random.randn(2, 256, 35, 87).astype(np.float32)
        self._run_test(A, B)

    def test_size_exceptions(self):
        A = np.random.randn(2, 256, 42, 86).astype(np.float32)
        B = np.random.randn(2, 256, 35, 87).astype(np.float32)
        with self.assertRaises(RuntimeError):
            self._run_test(A, B)

        A = np.random.randn(2, 255, 42, 88).astype(np.float32)
        B = np.random.randn(2, 256, 35, 87).astype(np.float32)
        with self.assertRaises(RuntimeError):
            self._run_test(A, B)


if __name__ == '__main__':
    workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
    c2_utils.import_detectron_ops()
    assert 'SpatialNarrowAs' in workspace.RegisteredOperators()
    logging_utils.setup_logging(__name__)
    unittest.main()
        A = np.random.randn(2, 3, 5, 7).astype(np.float32)
        I = np.array([0, 1], dtype=np.int32)
        self._run_op_test(A, I, check_grad=True)

        A = np.random.randn(2, 3, 5, 7).astype(np.float32)
        I = np.array([1, 0], dtype=np.int32)
        self._run_op_test(A, I, check_grad=True)

        A = np.random.randn(10, 3, 5, 7).astype(np.float32)
        I = np.array(np.random.permutation(10), dtype=np.int32)
        self._run_op_test(A, I, check_grad=True)

    def test_size_exceptions(self):
        A = np.random.randn(2, 256, 42, 86).astype(np.float32)
        I = np.array(np.random.permutation(10), dtype=np.int32)
        with self.assertRaises(RuntimeError):
            self._run_op_test(A, I)

    # See doc string in _run_speed_test
    # def test_perf(self):
    #     with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
    #         self._run_speed_test()


if __name__ == '__main__':
    workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
    c2_utils.import_detectron_ops()
    assert 'BatchPermutation' in workspace.RegisteredOperators()
    logging_utils.setup_logging(__name__)
    unittest.main()
                roi_data_loader._minibatch_queue.qsize(),
                cfg.DATA_LOADER.MINIBATCH_QUEUE_SIZE
            )
        )
        # Sleep to simulate the time taken by running a little network
        time.sleep(opts.sleep_time)
        # To inspect:
        # blobs = workspace.FetchBlobs(all_blobs)
        # from IPython import embed; embed()
    logger.info('Shutting down data loader...')
    roi_data_loader.shutdown()


if __name__ == '__main__':
    workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
    logger = setup_logging(__name__)
    logger.setLevel(logging.DEBUG)
    logging.getLogger('detectron.roi_data.loader').setLevel(logging.INFO)
    np.random.seed(cfg.RNG_SEED)
    args = parse_args()
    logger.info('Called with args:')
    logger.info(args)
    if args.cfg_file is not None:
        merge_cfg_from_file(args.cfg_file)
    if args.opts is not None:
        merge_cfg_from_list(args.opts)
    assert_and_infer_cfg()
    logger.info('Running with config:')
    logger.info(pprint.pformat(cfg))
    main(args)
Exemple #12
0
    return args


def do_reval(dataset_name, output_dir, args):
    dataset = JsonDataset(dataset_name)
    dets = load_object(os.path.join(output_dir, 'detections.pkl'))

    # Override config with the one saved in the detections file
    if args.cfg_file is not None:
        core_config.merge_cfg_from_cfg(core_config.load_cfg(dets['cfg']))
    else:
        core_config._merge_a_into_b(core_config.load_cfg(dets['cfg']), cfg)
    results = task_evaluation.evaluate_all(
        dataset,
        dets['all_boxes'],
        dets['all_segms'],
        dets['all_keyps'],
        output_dir,
        use_matlab=args.matlab_eval
    )
    task_evaluation.log_copy_paste_friendly_results(results)


if __name__ == '__main__':
    setup_logging(__name__)
    args = parse_args()
    if args.comp_mode:
        cfg.TEST.COMPETITION_MODE = True
    output_dir = os.path.abspath(args.output_dir[0])
    do_reval(args.dataset_name, output_dir, args)
def main():
    # Initialize C2
    workspace.GlobalInit(
        ['caffe2', '--caffe2_log_level=0', '--caffe2_gpu_memory_tracking=1'])
    # Set up logging and load config options
    logger = setup_logging(__name__)
    logging.getLogger('detectron.roi_data.loader').setLevel(logging.INFO)
    args = parse_args()
    logger.info('Called with args:')
    logger.info(args)
    if args.cfg_file is not None:
        merge_cfg_from_file(args.cfg_file)
    if args.opts is not None:
        merge_cfg_from_list(args.opts)
    assert_and_infer_cfg()
    smi_output, cuda_ver, cudnn_ver = c2_utils.get_nvidia_info()
    logger.info("cuda version : {}".format(cuda_ver))
    logger.info("cudnn version: {}".format(cudnn_ver))
    logger.info("nvidia-smi output:\n{}".format(smi_output))
    logger.info('Training with config:')
    logger.info(pprint.pformat(cfg))
    # Note that while we set the numpy random seed network training will not be
    # deterministic in general. There are sources of non-determinism that cannot
    # be removed with a reasonble execution-speed tradeoff (such as certain
    # non-deterministic cudnn functions).
    np.random.seed(cfg.RNG_SEED)
    # test model
    logger.info("creat test model ...")
    test_model = test_engine.initialize_model_from_cfg(cfg.TEST.WEIGHTS,
                                                       gpu_id=0)
    logger.info("created test model ...")
    train_data = DataLoader(root,
                            "train_id.txt",
                            cfg,
                            test_model,
                            is_train=True)
    # creat mode
    model, weights_file, start_iter, checkpoints = create_model(
        True, cfg, output_dir)
    # test blob
    print(workspace.Blobs())
    # create input blob
    blob_names = ['data_stage2', 'gt_label_stage2']
    for gpu_id in range(cfg.NUM_GPUS):
        with c2_utils.NamedCudaScope(gpu_id):
            for blob_name in blob_names:
                workspace.CreateBlob(core.ScopedName(blob_name))
    # Override random weight initialization with weights from a saved model
    if weights_file:
        nu.initialize_gpu_from_weights_file(model, weights_file, gpu_id=0)
    # Even if we're randomly initializing we still need to synchronize
    # parameters across GPUs
    nu.broadcast_parameters(model)
    workspace.CreateNet(model.net)

    logger.info('Outputs saved to: {:s}'.format(os.path.abspath(output_dir)))
    dump_proto_files(model, output_dir)

    writer = SummaryWriter(log_dir=output_dir)
    training_stats = TrainingStats(model, writer)
    CHECKPOINT_PERIOD = int(cfg.TRAIN.SNAPSHOT_ITERS / cfg.NUM_GPUS)
    logger.info("start train ...")
    for cur_iter in range(start_iter, cfg.SOLVER.MAX_ITER):
        # feed data
        # print("{} iter starting feed data...".format(cur_iter))
        data_stage2, gt_label = train_data.next_batch()
        with c2_utils.NamedCudaScope(gpu_id):
            workspace.FeedBlob(core.ScopedName('data_stage2'), data_stage2)
            workspace.FeedBlob(core.ScopedName('gt_label_stage2'), gt_label)

        # print("workspace.RunNet(model.net.Proto().name)")
        training_stats.IterTic()
        lr = model.UpdateWorkspaceLr(cur_iter,
                                     lr_policy.get_lr_at_iter(cur_iter))
        workspace.RunNet(model.net.Proto().name)
        if cur_iter == start_iter:
            nu.print_net(model)
        training_stats.IterToc()
        training_stats.UpdateIterStats(cur_iter)
        training_stats.LogIterStats(cur_iter, lr)
        writer.add_scalar('learning_rate', lr, cur_iter)

        # print("end of RunNet")
        if (cur_iter + 1) % CHECKPOINT_PERIOD == 0 and cur_iter > start_iter:
            checkpoints[cur_iter] = os.path.join(
                output_dir, 'model_iter{}.pkl'.format(cur_iter))
            nu.save_model_to_weights_file(checkpoints[cur_iter], model)

        if cur_iter == start_iter + training_stats.LOG_PERIOD:
            # Reset the iteration timer to remove outliers from the first few
            # SGD iterations
            training_stats.ResetIterTimer()

        if np.isnan(training_stats.iter_total_loss):
            handle_critical_error(model, 'Loss is NaN')

    # Save the final model
    checkpoints['final'] = os.path.join(output_dir, 'model_final.pkl')
    nu.save_model_to_weights_file(checkpoints['final'], model)
    # save train loss and metric
    state_file = os.path.join(output_dir, 'training_state.json')
    training_stats.SaveTrainingStates(state_file)
    # Execute the training run
    checkpoints = detectron.utils.train.train_model()
    # Test the trained model
    if not args.skip_test:
        test_model(checkpoints['final'], args.multi_gpu_testing, args.opts)
Exemple #14
0
def main():
    # Initialize C2
    workspace.GlobalInit(
        ['caffe2', '--caffe2_log_level=0', '--caffe2_gpu_memory_tracking=1'])
    # Set up logging and load config options
    logger = setup_logging(__name__)
    logging.getLogger('detectron.roi_data.loader').setLevel(logging.INFO)
    args = parse_args()
    logger.info('Called with args:')
    logger.info(args)
    if args.cfg_file is not None:
        merge_cfg_from_file(args.cfg_file)
    if args.opts is not None:
        merge_cfg_from_list(args.opts)

    assert_and_infer_cfg()
    logger.info('Training with config:')
    logger.info(pprint.pformat(cfg))
    # Note that while we set the numpy random seed network training will not be
    # deterministic in general. There are sources of non-determinism that cannot
    # be removed with a reasonble execution-speed tradeoff (such as certain
    # non-deterministic cudnn functions).
    np.random.seed(cfg.RNG_SEED)
    # Execute the training run

    fs = open('imgnames.pkl', 'rb')
    roidbnames = pickle.load(fs)
    fs.close()

    logger.info('Loading dataset: {}'.format(cfg.TRAIN.DATASETS))

    dataset_names = cfg.TRAIN.DATASETS
    proposal_files = cfg.TRAIN.PROPOSAL_FILES

    roidb = get_training_roidb(dataset_names, proposal_files)

    logger.info('{:d} roidb entries'.format(len(roidb)))

    total_num = len(roidb)

    # bitmap idx indicated for training
    bitmapRoidb = BitMap(total_num)

    # initial samples
    #    initial_num = int(total_num*0.2)
    #    for i in range(initial_num):
    #        bitmapRoidb.set(i)
    #
    #    train_roidb = [roidb[i] for i in range(initial_num)]

    initialidx = []
    train_roidb = []

    for i, x in enumerate(roidb):
        if x['image'].split('/')[-1] in roidbnames:
            initialidx.append(i)
            train_roidb.append(x)

    for i in initialidx:
        bitmapRoidb.set(i)

    logger.info('{:d} the number initial roidb entries'.format(
        len(train_roidb)))
    # append flipped images
    train_roidb = flipped_roidb_for_training(train_roidb)

    logger.info('{:d} the number initial roidb entries'.format(
        len(train_roidb)))
    alamount = 0
    ssamount = 0
    gamma = 0.95
    # control al proportion
    al_proportion_checkpoint = [
        int(x * total_num * 0.4) for x in np.linspace(0.2, 1, 10)
    ]
    # control ss proportion
    ss_proportion_checkpoint = [
        int(x * total_num) for x in np.linspace(0.2, 2, 10)
    ]

    next_iters = 90000
    sum_iters = next_iters
    '''load the lasted checkpoints'''
    checkpoints = detectron.utils.train.train_model(sum_iters, train_roidb,
                                                    cfg.TRAIN.WEIGHTS)
    while True:
        # to do a test on the test dataset
        test_model(checkpoints[(sum_iters - 1)], args.multi_gpu_testing,
                   args.opts)
        if sum_iters > cfg.SOLVER.MAX_ITER:
            break
        # next detect unlabeled samples
        unlabeledidx = list(set(range(total_num)) - set(bitmapRoidb.nonzero()))
        # labeled samples
        labeledidx = list(set(bitmapRoidb.nonzero()))
        # detect unlabeled samples
        BBoxes, YClass, Scores, al_candidate_idx, ALScore = detect_im(
            checkpoints[(sum_iters - 1)],
            roidb,
            gamma,
            idxs=unlabeledidx,
            gpu_id=0)

        al_avg_idx = np.argsort(np.array(ALScore))
        al_candidate_idx = [al_candidate_idx[i] for i in al_avg_idx]

        gamma = max(gamma - 0.05, 0.7)

        # the ss candidate idx
        ss_candidate_idx = [
            i for i in unlabeledidx if i not in al_candidate_idx
        ]

        # update roidb for next training
        train_roidb = replace_roidb(roidb, BBoxes, YClass, ss_candidate_idx)

        # control the proportion
        if alamount + len(al_candidate_idx) >= al_proportion_checkpoint[0]:
            al_candidate_idx = al_candidate_idx[:int(
                al_proportion_checkpoint[0] - alamount)]
            tmp = al_proportion_checkpoint.pop(0)
            al_proportion_checkpoint.append(al_proportion_checkpoint[-1])
        if ssamount + len(ss_candidate_idx) >= ss_proportion_checkpoint[0]:
            ss_candidate_idx = ss_candidate_idx[:int(
                ss_proportion_checkpoint[0] - ssamount)]
            tmp = ss_proportion_checkpoint.pop(0)
            ss_proportion_checkpoint.append(ss_proportion_checkpoint[-1])

        # record ss and al factor

        alamount += len(al_candidate_idx)
        ssamount += len(ss_candidate_idx)

        logger.info('alfactor:{},ssfactor:{}'.format(alamount / total_num,
                                                     ssamount / total_num))

        #       for idx in al_candidate_idx:
        #            bitmapRoidb.set(idx)
        next_train_idx = bitmapRoidb.nonzero()
        next_train_idx.extend(ss_candidate_idx)

        train_roidb = blur_image(train_roidb, ss_candidate_idx)
        # the next training roidb
        train_roidb = [train_roidb[i] for i in next_train_idx]
        # flipped the roidb
        train_roidb = flipped_roidb_for_training(train_roidb)
        # the next training iters
        next_iters = 30000
        sum_iters += next_iters
        checkpoints = detectron.utils.train.train_model(
            sum_iters, train_roidb, checkpoints[(sum_iters - next_iters - 1)])
def main():
    # Initialize C2
    workspace.GlobalInit(
        ['caffe2', '--caffe2_log_level=0', '--caffe2_gpu_memory_tracking=1']
    )
    # Set up logging and load config options
    logger = setup_logging(__name__)
    logging.getLogger('detectron.roi_data.loader').setLevel(logging.INFO)
    args = parse_args()
    logger.info('Called with args:')
    logger.info(args)
    if args.cfg_file is not None:
        merge_cfg_from_file(args.cfg_file)
    if args.opts is not None:
        merge_cfg_from_list(args.opts)
    assert_and_infer_cfg()
    smi_output, cuda_ver, cudnn_ver = c2_utils.get_nvidia_info()
    logger.info("cuda version : {}".format(cuda_ver))
    logger.info("cudnn version: {}".format(cudnn_ver))
    logger.info("nvidia-smi output:\n{}".format(smi_output))
    logger.info('Training with config:')
    logger.info(pprint.pformat(cfg))
    # Note that while we set the numpy random seed network training will not be
    # deterministic in general. There are sources of non-determinism that cannot
    # be removed with a reasonble execution-speed tradeoff (such as certain
    # non-deterministic cudnn functions).
    np.random.seed(cfg.RNG_SEED)
    # test model
    logger.info("creat test model ...")
    test_model = test_engine.initialize_model_from_cfg(cfg.TEST.WEIGHTS, gpu_id=0)
    logger.info("created test model ...")
    #cfg.TRAIN.IMS_PER_BATCH = 1
    train_data = DataLoader(root, "val_id.txt", cfg, test_model, is_train=False)
    # creat mode
    model, weights_file, start_iter, checkpoints = create_model(False, cfg, output_dir)
    # test blob
    print(workspace.Blobs())
    # create input blob
    blob_names = ['data_stage2']
    for gpu_id in range(cfg.NUM_GPUS):
        with c2_utils.NamedCudaScope(gpu_id):
            for blob_name in blob_names:
                workspace.CreateBlob(core.ScopedName(blob_name))
    # Override random weight initialization with weights from a saved model
    if weights_file:
        nu.initialize_gpu_from_weights_file(model, weights_file, gpu_id=0)
    # Even if we're randomly initializing we still need to synchronize
    # parameters across GPUs
    nu.broadcast_parameters(model)
    workspace.CreateNet(model.net)

    logger.info('Outputs saved to: {:s}'.format(os.path.abspath(output_dir)))

    logger.info("start test ...")
    save_root = os.path.join(output_dir, 'fusion')
    if not os.path.exists(save_root):
        os.makedirs(save_root)
    for cur_iter in range(10000):
        # feed data
        # print("{} iter starting feed data...".format(cur_iter))
        data_stage2, gt_label, meta = train_data.next_batch()
        '''# 
        print('input0-20 sungalsses max score:', np.max(data_stage2[0, 4, :, :]))
        print('input20-40 sungalsses max score:', np.max(data_stage2[0, 24, :, :]))
        print('input0-20 glovess max score:', np.max(data_stage2[0, 3, :, :]))
        print('input20-40 glovess max score:', np.max(data_stage2[0, 23, :, :]))
        #'''
        with c2_utils.NamedCudaScope(gpu_id):
            workspace.FeedBlob(core.ScopedName('data_stage2'), data_stage2)

        # print("workspace.RunNet(model.net.Proto().name)")
        with c2_utils.NamedCudaScope(gpu_id):
            workspace.RunNet(model.net.Proto().name)
            batch_probs = workspace.FetchBlob(core.ScopedName('probs_human_NCHW_stage2'))
            batch_probs = batch_probs.transpose((0, 2, 3, 1))
        assert len(meta) == batch_probs.shape[0]
        #print('batch_probs shape:', batch_probs.shape)
        for i in range(len(meta)):
            probs = cv2.resize(batch_probs[i], (meta[i]['width'], meta[i]['height']), interpolation=cv2.INTER_LINEAR)
            probs = probs.transpose((2,0,1))
            print('sungalsses max score:', np.max(probs[4, :, :]))
            print('glovess max score:', np.max(probs[3, :, :]))
            #print('probs shape:', probs.shape)
            cv2.imwrite(os.path.join(save_root, meta[i]['id']+'.png'), probs.argmax(0))
        print("prossed ", cur_iter)
Exemple #16
0
def main():
    # Initialize C2
    workspace.GlobalInit(
        ['caffe2', '--caffe2_log_level=0', '--caffe2_gpu_memory_tracking=1'])
    # Set up logging and load config options
    logger = setup_logging(__name__)
    logging.getLogger('detectron.roi_data.loader').setLevel(logging.INFO)
    args = parse_args()
    logger.info('Called with args:')
    logger.info(args)
    if args.cfg_file is not None:
        merge_cfg_from_file(args.cfg_file)
    if args.opts is not None:
        merge_cfg_from_list(args.opts)
    assert_and_infer_cfg()
    smi_output, cuda_ver, cudnn_ver = c2_utils.get_nvidia_info()
    logger.info("cuda version : {}".format(cuda_ver))
    logger.info("cudnn version: {}".format(cudnn_ver))
    logger.info("nvidia-smi output:\n{}".format(smi_output))
    logger.info('Training with config:')
    logger.info(pprint.pformat(cfg))
    # Note that while we set the numpy random seed network training will not be
    # deterministic in general. There are sources of non-determinism that cannot
    # be removed with a reasonble execution-speed tradeoff (such as certain
    # non-deterministic cudnn functions).
    np.random.seed(cfg.RNG_SEED)
    # Execute the training run

    if os.path.exists('./detectron/datasets/data/coco'):
        shutil.rmtree('./detectron/datasets/data/coco')
    os.makedirs('./detectron/datasets/data/coco')

    if 'dior_2nd' in cfg.OUTPUT_DIR:
        os.system(
            'ln -s /home/wsh/dior/coco/coco_train2014 ./detectron/datasets/data/coco/coco_train2014'
        )
        os.system(
            'ln -s /home/wsh/dior/coco/coco_val2014 ./detectron/datasets/data/coco/coco_val2014 '
        )
        os.system(
            'ln -s /home/wsh/dior/coco/annotationsN_2nd ./detectron/datasets/data/coco/annotations'
        )
    elif 'dior_3rd' in cfg.OUTPUT_DIR:
        os.system(
            'ln -s /home/wsh/dior/coco/coco_train2014 ./detectron/datasets/data/coco/coco_train2014'
        )
        os.system(
            'ln -s /home/wsh/dior/coco/coco_val2014 ./detectron/datasets/data/coco/coco_val2014 '
        )
        os.system(
            'ln -s /home/wsh/dior/coco/annotationsN_3rd ./detectron/datasets/data/coco/annotations'
        )
    elif 'dior_4th' in cfg.OUTPUT_DIR:
        os.system(
            'ln -s /home/wsh/dior/coco/coco_train2014 ./detectron/datasets/data/coco/coco_train2014'
        )
        os.system(
            'ln -s /home/wsh/dior/coco/coco_val2014 ./detectron/datasets/data/coco/coco_val2014 '
        )
        os.system(
            'ln -s /home/wsh/dior/coco/annotationsN_4th ./detectron/datasets/data/coco/annotations'
        )
    elif 'dior_5th' in cfg.OUTPUT_DIR:
        os.system(
            'ln -s /home/wsh/dior/coco/coco_train2014 ./detectron/datasets/data/coco/coco_train2014'
        )
        os.system(
            'ln -s /home/wsh/dior/coco/coco_val2014 ./detectron/datasets/data/coco/coco_val2014 '
        )
        os.system(
            'ln -s /home/wsh/dior/coco/annotationsN_5th ./detectron/datasets/data/coco/annotations'
        )
    elif '2020.10.6' in cfg.OUTPUT_DIR:
        os.system(
            'ln -s /home/wsh/dior/coco/coco_train2014 ./detectron/datasets/data/coco/coco_train2014'
        )
        os.system(
            'ln -s /home/wsh/dior/coco/coco_val2014 ./detectron/datasets/data/coco/coco_val2014 '
        )
        os.system(
            'ln -s /home/wsh/dior/coco/annotationsN ./detectron/datasets/data/coco/annotations'
        )
    else:
        raise Exception

    checkpoints = detectron.utils.train.train_model()
    # Test the trained model
    if not args.skip_test:
        test_model(checkpoints['final'], args.multi_gpu_testing, args.opts)