Exemplo n.º 1
0
    'DABNet': DABNet,
    'FPENet': FPENet
}

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='ptflops sample script')
    parser.add_argument('--device',
                        type=int,
                        default=0,
                        help='Device to store the model.')
    parser.add_argument('--model',
                        choices=list(pt_models.keys()),
                        type=str,
                        default='ENet')
    parser.add_argument('--result', type=str, default=None)
    args = parser.parse_args()

    if args.result is None:
        ost = sys.stdout
    else:
        ost = open(args.result, 'w')

    with torch.cuda.device(args.device):
        net = pt_models[args.model](classes=19).cuda()

        flops, params = get_model_complexity_info(net, (3, 512, 1024),
                                                  as_strings=True,
                                                  print_per_layer_stat=True,
                                                  ost=ost)
        print('Flops: ' + flops)
        print('Params: ' + params)
Exemplo n.º 2
0
if __name__ == "__main__":
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = DF1SegX38(classes=19).to(device)
    summary(model, (3, 352, 480))
    x = torch.randn(2, 3, 512, 1024).to(device)

    from fvcore.nn.jit_handles import batchnorm_flop_jit
    from fvcore.nn.jit_handles import generic_activation_jit

    supported_ops = {
        "aten::batch_norm": batchnorm_flop_jit,
    }
    flop_dict, _ = flop_count(model, (x, ), supported_ops)

    flops_count, params_count = get_model_complexity_info(
        model, (3, 512, 1024), as_strings=False, print_per_layer_stat=True)
    input = x
    macs, params = profile(model, inputs=(input, ))
    print(flop_dict)
    print(flops_count, params_count)
    print(macs, params)
'''
/home/ethan/anaconda3/envs/py36_cuda101/bin/python /home/ethan/codes/Efficient-Segmentation-Networks/model/DFSegX16.py
/home/ethan/anaconda3/envs/py36_cuda101/lib/python3.6/site-packages/torch/nn/functional.py:2941: UserWarning: nn.functional.upsample is deprecated. Use nn.functional.interpolate instead.
  warnings.warn("nn.functional.upsample is deprecated. Use nn.functional.interpolate instead.")
/home/ethan/anaconda3/envs/py36_cuda101/lib/python3.6/site-packages/torch/nn/functional.py:3121: UserWarning: Default upsampling behavior when mode=bilinear is changed to align_corners=False since 0.4.0. Please specify align_corners=True if the old behavior is desired. See the documentation of nn.Upsample for details.
  "See the documentation of nn.Upsample for details.".format(mode))
----------------------------------------------------------------
        Layer (type)               Output Shape         Param #
================================================================
            Conv2d-1          [-1, 3, 353, 481]              36
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # for i in [1,2,4,6,8,10,12]:
    for i in [16]:
        img_h, img_w = 32 * i, 64 * i
        for name, network in models.items():
            print(name, network)
            model = network(classes=11).to(device)

            total_params, trainable_params, total_input_size, total_output_size, total_params_size, total_size = summary(
                model, (3, img_h, img_w))

            # time.sleep(1)

            flops, params = get_model_complexity_info(
                model, (3, img_h, img_w),
                as_strings=False,
                print_per_layer_stat=True,
                ost=sys.stdout)
            # print('Flops: ' + flops)
            # print('Params: ' + params)
            result = dict()
            result['net'] = name
            result['size'] = '{}x{}'.format(img_h, img_w)
            result['Flops'] = flops / (10**9)
            result['params'] = params / (10**6)
            result['params_A'] = total_params.item()
            result['params_T'] = trainable_params.item()
            result['Input(MB)'] = total_input_size
            result['F/B (MB)'] = total_output_size
            result['Params(MB)'] = total_params_size
            result['Total(MB)'] = total_size