コード例 #1
0
def main():
    """Entrypoint for test_converter"""
    parser = argparse.ArgumentParser(description='Test Caffe converter')
    parser.add_argument('--cpu', action='store_true', help='use cpu?')
    parser.add_argument('--image_url', type=str,
                        default='https://github.com/dmlc/web-data/raw/master/mxnet/doc/'\
                                'tutorials/python/predict_image/cat.jpg',
                        help='input image to test inference, can be either file path or url')
    args = parser.parse_args()
    if args.cpu:
        gpus = [-1]
        default_batch_size = 32
    else:
        num_gpus = mx.context.num_gpus()
        assert num_gpus, 'At least one GPU is needed to run test_converter in GPU mode'
        default_batch_size = 32 * num_gpus

    models = ['bvlc_googlenet', 'vgg-16', 'resnet-50']

    val = download_data()
    for m in models:
        test_model_weights_and_outputs(m, args.image_url, gpus[0])
        # Build/testing machines tend to be short on GPU memory
        this_batch_size = default_batch_size / 4 if m == 'vgg-16' else default_batch_size
        test_imagenet_model_performance(m, val, gpus, this_batch_size)
コード例 #2
0
def main():
    """Entrypoint for test_converter"""
    parser = argparse.ArgumentParser(description='Test Caffe converter')
    parser.add_argument('--cpu', action='store_true', help='use cpu?')
    parser.add_argument('--image_url', type=str,
                        default='https://github.com/dmlc/web-data/raw/master/mxnet/doc/'\
                                'tutorials/python/predict_image/cat.jpg',
                        help='input image to test inference, can be either file path or url')
    args = parser.parse_args()
    if args.cpu:
        gpus = [-1]
        default_batch_size = 32
    else:
        gpus = mx.test_utils.list_gpus()
        assert gpus, 'At least one GPU is needed to run test_converter in GPU mode'
        default_batch_size = 32 * len(gpus)

    models = ['bvlc_googlenet', 'vgg-16', 'resnet-50']

    val = download_data()
    for m in models:
        test_model_weights_and_outputs(m, args.image_url, gpus[0])
        # Build/testing machines tend to be short on GPU memory
        this_batch_size = default_batch_size / 4 if m == 'vgg-16' else default_batch_size
        test_imagenet_model_performance(m, val, gpus, this_batch_size)
コード例 #3
0
ファイル: test_converter.py プロジェクト: zackchase/mxnet
def main():
    """Entrypoint for test_converter"""
    parser = argparse.ArgumentParser(description='Test Caffe converter')
    parser.add_argument('--cpu', action='store_true', help='use cpu?')
    parser.add_argument(
        '--image_url',
        type=str,
        default=
        'http://writm.com/wp-content/uploads/2016/08/Cat-hd-wallpapers.jpg',
        help='input image to test inference, can be either file path or url')
    args = parser.parse_args()
    if args.cpu:
        gpus = [-1]
        batch_size = 32
    else:
        gpus = mx.test_utils.list_gpus()
        assert gpus, 'At least one GPU is needed to run test_converter in GPU mode'
        batch_size = 32 * len(gpus)

    models = ['bvlc_googlenet']

    val = download_data()
    for m in models:
        test_model_weights_and_outputs(m, args.image_url, gpus[0])
        test_imagenet_model_performance(m, val, gpus, batch_size)
コード例 #4
0
def main():
    gpus = mx.test_utils.list_gpus()
    assert len(gpus) > 0
    batch_size = 32 * len(gpus)

    models = ['bvlc_googlenet', 'vgg-16', 'resnet-50']

    val = download_data()
    for m in models:
        test_imagenet_model(m, val, ','.join([str(i) for i in gpus]), batch_size)
コード例 #5
0
ファイル: test_converter.py プロジェクト: KeyKy/mxnet-old
def main():
    """Entrypoint for test_converter"""
    parser = argparse.ArgumentParser(description='Test Caffe converter')
    parser.add_argument('--cpu', action='store_true', help='use cpu?')
    args = parser.parse_args()
    if args.cpu:
        gpus = ''
        batch_size = 32
    else:
        gpus = mx.test_utils.list_gpus()
        assert gpus, 'At least one GPU is needed to run test_converter in GPU mode'
        batch_size = 32 * len(gpus)

    models = ['bvlc_googlenet', 'vgg-16', 'resnet-50']

    val = download_data()
    for m in models:
        test_imagenet_model(m, val, ','.join([str(i) for i in gpus]),
                            batch_size)
コード例 #6
0
ファイル: test_converter.py プロジェクト: Piyush3dB/mxnet
def main():
    """Entrypoint for test_converter"""
    parser = argparse.ArgumentParser(description='Test Caffe converter')
    parser.add_argument('--cpu', action='store_true', help='use cpu?')
    parser.add_argument('--image_url', type=str,
                        default='http://writm.com/wp-content/uploads/2016/08/Cat-hd-wallpapers.jpg',
                        help='input image to test inference, can be either file path or url')
    args = parser.parse_args()
    if args.cpu:
        gpus = [-1]
        batch_size = 32
    else:
        gpus = mx.test_utils.list_gpus()
        assert gpus, 'At least one GPU is needed to run test_converter in GPU mode'
        batch_size = 32 * len(gpus)

    models = ['bvlc_googlenet', 'vgg-16', 'resnet-50']

    val = download_data()
    for m in models:
        test_model_weights_and_outputs(m, args.image_url, gpus[0])
        test_imagenet_model_performance(m, val, gpus, batch_size)
コード例 #7
0
        mean_args = {'mean_img': mean}
    else:
        mean_args = {'rgb_mean': ','.join([str(i) for i in mean])}

    (speed, ) = score(model=(sym, arg_params, aux_params),
                      data_val=val,
                      label_name='prob_label',
                      metrics=acc,
                      gpus=gpus,
                      batch_size=batch_size,
                      max_num_examples=500,
                      **mean_args)
    logging.info('speed : %f image/sec', speed)
    for a in acc:
        logging.info(a.get())
    assert acc[0].get()[1] > meta_info['top-1-acc'] - 0.3
    assert acc[1].get()[1] > meta_info['top-5-acc'] - 0.3


if __name__ == '__main__':
    gpus = mx.test_utils.list_gpus()
    assert len(gpus) > 0
    batch_size = 32 * len(gpus)

    models = ['bvlc_googlenet', 'vgg-16', 'resnet-50']

    val = download_data()
    for m in models:
        test_imagenet_model(m, val, ','.join([str(i) for i in gpus]),
                            batch_size)
コード例 #8
0
ファイル: test_converter.py プロジェクト: Johnqczhang/mxnet
    acc = [mx.metric.create('acc'), mx.metric.create('top_k_accuracy', top_k = 5)]
    if isinstance(mean, str):
        mean_args = {'mean_img':mean}
    else:
        mean_args = {'rgb_mean':','.join([str(i) for i in mean])}

    (speed,) = score(model=(sym, arg_params, aux_params),
                     data_val=val,
                     label_name = 'prob_label',
                     metrics=acc,
                     gpus=gpus,
                     batch_size=batch_size,
                     max_num_examples=500,
                     **mean_args)
    logging.info('speed : %f image/sec', speed)
    for a in acc:
        logging.info(a.get())
    assert acc[0].get()[1] > meta_info['top-1-acc'] - 0.3
    assert acc[1].get()[1] > meta_info['top-5-acc'] - 0.3

if __name__ == '__main__':
    gpus = mx.test_utils.list_gpus()
    assert len(gpus) > 0
    batch_size = 32 * len(gpus)

    models = ['bvlc_googlenet', 'vgg-16', 'resnet-50']

    val = download_data()
    for m in models:
        test_imagenet_model(m, val, ','.join([str(i) for i in gpus]), batch_size)