def main():
    c2_utils.import_contrib_ops()
    c2_utils.import_detectron_ops()
    cv2.ocl.setUseOpenCL(False)
    args = parse_args()
    input_file = args.input_file
    workspace.GlobalInit(
        ['caffe2', '--caffe2_log_level=0', '--caffe2_gpu_memory_tracking=1'])
    merge_cfg_from_file(
        '/home/LAB/wusj/exp/KL-Loss/configs/e2e_faster_rcnn_R-50-FPN_2x_entropy.yaml'
    )
    assert_and_infer_cfg(cache_urls=False)
    smi_output, cuda_ver, cudnn_ver = c2_utils.get_nvidia_info()
    logger.info("cuda version : {}".format(cuda_ver))
    logger.info("cudnn version: {}".format(cudnn_ver))
    logger.info("nvidia-smi output:\n{}".format(smi_output))
    logger.info('Training with config:')
    logger.info(pprint.pformat(cfg))
    workspace.ResetWorkspace()

    np.random.seed(cfg.RNG_SEED)
    with open(input_file, 'r') as f:
        config = json.load(f)

        parameter = config['parameter']
        inference_id = parameter['inferenceId']

        dataset = parameter['dataSetName']
        model_file = parameter['modelPkl']
        task_id = inference_id
        image_id_list = parameter['imageIdList']

        load_data(dataset, image_id_list)
        roidb, result = test_net(model_file, dataset)

        tmp = []
        for i, entry in enumerate(roidb):
            tmp.append(entry)
        tmp.sort(cmp=compare)

        selectNum = parameter['selectNum']
        output = {}
        # time.sleep(3)
        output['annotationList'] = []
        output['selectImageIdList'] = []
        output['remainImageIdList'] = []
        for i in range(selectNum):
            output['annotationList'].append(result[str(tmp[i]['id'])])
            output['selectImageIdList'].append(str(tmp[i]['id']))
        for i in range(parameter['selectNum'], len(parameter['imageIdList'])):
            output['remainImageIdList'].append(str(tmp[i]['id']))
        output['remainImageNum'] = len(output['remainImageIdList'])
        output['inferenceId'] = inference_id
        output['dataSetName'] = dataset

        result_output_dir = '/home/LAB/wusj/fastwash_tmp/inference/'
        with open(result_output_dir + 'result_' + task_id, 'wt') as f2:
            json.dump(output, f2)
def main():
    c2_utils.import_contrib_ops()
    c2_utils.import_detectron_ops()
    cv2.ocl.setUseOpenCL(False)
    workspace.GlobalInit(
        ['caffe2', '--caffe2_log_level=0', '--caffe2_gpu_memory_tracking=1'])
    merge_cfg_from_file(
        '/home/LAB/wusj/exp/KL-Loss/configs/e2e_faster_rcnn_R-50-FPN_2x_entropy.yaml'
    )
    assert_and_infer_cfg(cache_urls=False)
    smi_output, cuda_ver, cudnn_ver = c2_utils.get_nvidia_info()
    logger.info("cuda version : {}".format(cuda_ver))
    logger.info("cudnn version: {}".format(cudnn_ver))
    logger.info("nvidia-smi output:\n{}".format(smi_output))
    logger.info('Training with config:')
    logger.info(pprint.pformat(cfg))
    workspace.ResetWorkspace()

    np.random.seed(cfg.RNG_SEED)
    args = parse_args()
    input_file = args.input_file
    result_output_dir = args.output_dir

    with open(input_file, 'r') as f:
        config = json.load(f)

        dataset = config['dataSetName']
        model_file = args.model_file
        task_id = config['id']
        image_id_list = config['imageIdList']

        load_data(dataset, image_id_list)
        roidb, result = test_net(model_file, dataset)
        config['inferenceResult'] = result
        with open(result_output_dir + 'result_' + task_id, 'wt') as f2:
            json.dump(config, f2)
Пример #3
0
from detectron.core.config import assert_and_infer_cfg
from detectron.core.config import cfg
from detectron.core.config import merge_cfg_from_file
from detectron.core.config import get_output_dir
from detectron.utils.io import cache_url
from detectron.utils.logging import setup_logging
import detectron.core.test_engine as infer_engine
import detectron.datasets.dummy_datasets as dummy_datasets
from detectron.datasets.dataset_catalog import get_im_dir
import detectron.utils.c2 as c2_utils
import detectron.utils.vis as vis_utils
from detectron.utils.tracking import Tracking, back_track, \
    infer_track_sequence, get_matlab_engine, eval_detections_matlab

c2_utils.import_contrib_ops()
c2_utils.import_detectron_ops()
c2_utils.import_custom_ops()


def parse_args():
    parser = argparse.ArgumentParser(description='End-to-end inference')
    parser.add_argument(
        '--cfg',
        dest='cfg',
        help='cfg model file (/path/to/model_config.yaml)',
        default=None,
        type=str
    )
    parser.add_argument(
        '--wts-pre',
Пример #4
0
from caffe2.proto import caffe2_pb2

from detectron.core.config import assert_and_infer_cfg
from detectron.core.config import cfg
from detectron.core.config import merge_cfg_from_file
from detectron.core.config import merge_cfg_from_list
from detectron.modeling import generate_anchors
from detectron.utils.logging import setup_logging
from detectron.utils.model_convert_utils import convert_op_in_proto
from detectron.utils.model_convert_utils import op_filter
import detectron.core.test_engine as test_engine
import detectron.utils.c2 as c2_utils
import detectron.utils.model_convert_utils as mutils
import detectron.utils.vis as vis_utils

c2_utils.import_contrib_ops()
c2_utils.import_detectron_ops()

# OpenCL may be enabled by default in OpenCV3; disable it because it's not
# thread safe and causes unwanted GPU memory allocations.
cv2.ocl.setUseOpenCL(False)

logger = setup_logging(__name__)


def parse_args():
    parser = argparse.ArgumentParser(
        description='Convert a trained network to pb format'
    )
    parser.add_argument(
        '--cfg', dest='cfg_file', help='optional config file', default=None,