Example #1
0
    def add_hooks(m: nn.Module):
        # if hasattr(m, "total_ops") or hasattr(m, "total_params"):
        #     logger.warning("Either .total_ops or .total_params is already defined in %s. "
        #                    "Be careful, it might change your code's behavior." % m._get_name())
        m.register_buffer('total_ops', torch.zeros(1))
        m.register_buffer('total_params', torch.zeros(1))

        for p in m.parameters():
            m.total_params += torch.Tensor([p.numel()])

        m_type = type(m)
        fn = None

        # if defined both op maps, custom_ops takes higher priority.
        if m_type in custom_ops:
            fn = custom_ops[m_type]
        elif m_type in register_hooks:
            fn = register_hooks[m_type]

        if fn is None:
            if verbose:
                logger.info("THOP has not implemented counting method for %s." % m._get_name())
        else:
            if verbose:
                logger.info("Register FLOP counter for module %s." % m._get_name())
            handler_collection[m] = m.register_forward_hook(fn)
Example #2
0
def modelsize(model: nn.Module, input:torch.Tensor, type_size=4):
    '''
    模型显存占用监测函数
    :param model: 输入的模型
    :param input: 实际中需要输入的Tensor变量
    :param type_size: 默认为 4, int类型,float32
    :return:
    '''
    para = sum([np.prod(list(p.size())) for p in model.parameters()])
    print('Model {} : params: {:4f}M'.format(model._get_name(), para * type_size / 1000 / 1000))

    input_ = input.clone()
    input_.requires_grad_(requires_grad=False)

    mods = list(model.modules())
    out_sizes = []

    for i in range(1,len(mods)):
        m = mods[i]
        print(m)
        if isinstance(m, nn.ReLU):
            if m.inplace:
                continue  # ReLU  inplace不增加参数
        out = m(input_)
        out_sizes.append(np.array(out_sizes))
        input_ = out

    total_nums = reduce(lambda x,y:x+np.prod(y),0 ,out_sizes)
    print('Model {} : intermedite variables: {:3f} M (without backward)'
          .format(model._get_name(), total_nums * type_size / 1000 / 1000))
    print('Model {} : intermedite variables: {:3f} M (with backward)'
          .format(model._get_name(), total_nums * type_size * 2 / 1000 / 1000))
    def repr_with_statistics(module: nn.Module, name: str) -> str:
        # We treat the extra repr like the sub-module, one item per line
        extra_lines = []
        extra_repr = module.extra_repr()
        printed_stats = print_statistics(name)
        # empty string will be split into list ['']
        if extra_repr:
            extra_lines.extend(extra_repr.split("\n"))
        if printed_stats:
            extra_lines.extend(printed_stats.split("\n"))
        child_lines = []
        for key, submod in module._modules.items():
            submod_name = name + ("." if name else "") + key
            # pyre-fixme[6]: Expected `Module` for 1st param but got
            #  `Optional[nn.modules.module.Module]`.
            submod_str = repr_with_statistics(submod, submod_name)
            submod_str = _addindent(submod_str, 2)
            child_lines.append("(" + key + "): " + submod_str)
        lines = extra_lines + child_lines

        main_str = module._get_name() + "("
        if lines:
            # simple one-liner info, which most builtin Modules will use
            if len(extra_lines) == 1 and not child_lines:
                main_str += extra_lines[0]
            else:
                main_str += "\n  " + "\n  ".join(lines) + "\n"

        main_str += ")"
        return main_str
    def repr_with_statistics(module: nn.Module, name: str) -> str:
        # We treat the extra repr like the sub-module, one item per line
        extra_lines = []
        # pyre-fixme[29]: `Union[nn.Module, torch.Tensor]` is not a function.
        extra_repr = module.extra_repr()
        printed_stats = print_statistics(name)
        # empty string will be split into list ['']
        if extra_repr:
            extra_lines.extend(extra_repr.split("\n"))
        if printed_stats:
            extra_lines.extend(printed_stats.split("\n"))
        child_lines = []
        for key, submod in module._modules.items():  # pyre-ignore[16]
            submod_name = name + ("." if name else "") + key
            submod_str = repr_with_statistics(submod, submod_name)
            submod_str = _addindent(submod_str, 2)
            child_lines.append("(" + key + "): " + submod_str)
        lines = extra_lines + child_lines

        # pyre-fixme[29]: `Union[nn.Module, torch.Tensor]` is not a function.
        main_str = module._get_name() + "("
        if lines:
            # simple one-liner info, which most builtin Modules will use
            if len(extra_lines) == 1 and not child_lines:
                main_str += extra_lines[0]
            else:
                main_str += "\n  " + "\n  ".join(lines) + "\n"

        main_str += ")"
        return main_str
Example #5
0
 def __init__(self,
              model: nn.Module,
              inspect_layers=(nn.Linear, nn.Conv2d),
              dumps_dir=DUMPS_DIR):
     self.hooks = []
     self.layer_to_name = {}
     self.inspect_layers = inspect_layers
     self.dumps_dir = Path(dumps_dir) / model._get_name()
     shutil.rmtree(self.dumps_dir, ignore_errors=True)
     self.dumps_dir.mkdir(parents=True)
     self.register_hooks(model)
     print(f"Dumping activations from {self.layer_to_name.values()} layers "
           f"to {self.dumps_dir}.")