Beispiel #1
0
    parser.add_argument('--eval', action='store_true')
    parser.add_argument('--flops', action='store_true', help='print flops and exit')
    args = parser.parse_args()

    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    if args.v2 and args.group != parser.get_default('group'):
        logger.error("group= is not used in ShuffleNetV2!")

    model = Model()

    if args.eval:
        batch = 128    # something that can run on one gpu
        ds = get_data('val', batch)
        eval_on_ILSVRC12(model, get_model_loader(args.load), ds)
    elif args.flops:
        # manually build the graph with batch=1
        input_desc = [
            InputDesc(tf.float32, [1, 224, 224, 3], 'input'),
            InputDesc(tf.int32, [1], 'label')
        ]
        input = PlaceholderInput()
        input.setup(input_desc)
        with TowerContext('', is_training=False):
            model.build_graph(*input.get_input_tensors())
        model_utils.describe_trainable_vars()

        tf.profiler.profile(
            tf.get_default_graph(),
            cmd='op',
Beispiel #2
0
        "Note that it's best to keep per-GPU batch size in [32, 64] to obtain the best accuracy."
        "Pretrained models listed in README were trained with batch=32x8.")
    args = parser.parse_args()

    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    bit_actn, bit_weight = None, None
    if args.quant:
        bit_actn, bit_weight = args.quant_bit_actn, args.quant_bit_weight

    model = Model(args.use_fp16, bit_actn, bit_weight)
    model.data_format = args.data_format
    if args.eval:
        batch = 128  # something that can run on one gpu
        ds = get_data('val', batch)
        eval_on_ILSVRC12(model, get_model_loader(args.load), ds)
    else:
        if args.fake:
            logger.set_logger_dir(os.path.join('train_log', 'tmp'), 'd')
        else:
            logger.set_logger_dir(
                os.path.join('train_log',
                             'imagenet-darknet-batch{}'.format(args.batch)))

        config = get_config(model, fake=args.fake)
        if args.load:
            config.session_init = get_model_loader(args.load)
        trainer = SyncMultiGPUTrainerReplicated(max(get_num_gpu(), 1))
        launch_train_with_config(config, trainer)
Beispiel #3
0
            raise
        logger.info("Name Transform: " + k + ' --> ' + newname)
        resnet_param[newname] = v
    return resnet_param


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--load', required=True,
                        help='.npy model file generated by tensorpack.utils.loadcaffe')
    parser.add_argument('-d', '--depth', help='resnet depth', required=True, type=int, choices=[50, 101, 152])
    parser.add_argument('--input', help='an input image')
    parser.add_argument('--convert', help='npz output file to save the converted model')
    parser.add_argument('--eval', help='ILSVRC dir to run validation on')

    args = parser.parse_args()
    DEPTH = args.depth

    param = np.load(args.load, encoding='latin1').item()
    param = convert_param_name(param)

    if args.convert:
        assert args.convert.endswith('.npz')
        np.savez_compressed(args.convert, **param)

    if args.eval:
        ds = get_imagenet_dataflow(args.eval, 'val', 128, get_inference_augmentor())
        eval_on_ILSVRC12(Model(), DictRestore(param), ds)
    elif args.input:
        run_test(param, args.input)
Beispiel #4
0
        '--load',
        required=True,
        help='.npz model file generated by tensorpack.utils.loadcaffe')
    parser.add_argument('-d',
                        '--depth',
                        help='resnet depth',
                        required=True,
                        type=int,
                        choices=[50, 101, 152])
    parser.add_argument('--input', help='an input image')
    parser.add_argument('--convert',
                        help='npz output file to save the converted model')
    parser.add_argument('--eval', help='ILSVRC dir to run validation on')

    args = parser.parse_args()
    DEPTH = args.depth

    param = dict(np.load(args.load))
    param = convert_param_name(param)

    if args.convert:
        assert args.convert.endswith('.npz')
        np.savez_compressed(args.convert, **param)

    if args.eval:
        ds = get_imagenet_dataflow(args.eval, 'val', 128,
                                   get_inference_augmentor())
        eval_on_ILSVRC12(Model(), DictRestore(param), ds)
    elif args.input:
        run_test(param, args.input)