コード例 #1
0
ファイル: eltops.py プロジェクト: fengyuentau/PyTorch-FLOPs
    def forward(self, a: TensorSize, b: TensorSize = None):
        assert isinstance(a, TensorSize), \
            'Type of input must be \'{}\'.'.format(TensorSize.__name__)
        if b is not None:
            assert isinstance(b, TensorSize), \
                'Type of input must be \'{}\'.'.format(TensorSize.__name__)
            assert a.dim == b.dim, 'Dimension of a and b must be equal.'
            assert a.value == b.value, 'Size of {} and {} must be equal.'.format(
                a.value, b.value)

        x = TensorSize(a.value)
        y = TensorSize(a.value)

        return y
コード例 #2
0
    def forward(self, x: TensorSize):
        assert isinstance(x, TensorSize), \
            'Type of input must be \'{}\'.'.format(TensorSize.__name__)

        y = TensorSize(x._tensor_size)

        return y
コード例 #3
0
    def forward(self, x: TensorSize):
        assert isinstance(x, TensorSize), \
            'Type of input must be \'{}\'.'.format(TensorSize.__name__)
        if self.num_parameters != 1:
            assert self.num_parameters == x.value[1], 'num_parameters must either be equal to 1 or the channels of input tensor.'

        y = TensorSize(x._tensor_size)

        return y
コード例 #4
0
    def forward(self, x: TensorSize):
        assert isinstance(x, TensorSize), \
            'Type of input must be \'{}\'.'.format(TensorSize.__name__)
        assert x.value[1] == self.num_features, 'The channel of input {:d} does not match with the definition {:d}'.format(x.value[0], self.num_features)

        if x.dim == 4:
            y = TensorSize(x.value)

            return y
        else:
            raise NotImplementedError('Not implemented yet for \'{:s}\' with dimension {:d} != 4.'.format(TensorSize.__name__, x.dim))
コード例 #5
0
    def forward(self, x: TensorSize):
        assert isinstance(x, TensorSize), \
            'Type of input must be \'{}\'.'.format(TensorSize.__name__)
        assert x.value[
            -1] == self.in_features, 'last dimension {:d} does not match with in_features {:d}.'.format(
                x.value[-1], self.in_features)

        y = [i for i in x.value]
        y[-1] = self.out_features
        y = TensorSize(y)

        return y
コード例 #6
0
ファイル: pooling.py プロジェクト: fengyuentau/PyTorch-FLOPs
    def forward(self, x: TensorSize):
        assert isinstance(x, TensorSize), \
            'Type of input must be \'{}\'.'.format(TensorSize.__name__)

        if x.dim == 4:
            bsin, cin, hin, win = x.value
            hout = self._calc_out(hin, 0)
            wout = self._calc_out(win, 1)
            y = TensorSize([bsin, cin, hout, wout])

            return y
        else:
            raise NotImplementedError(
                'Not implemented yet for \'{:s}\' with dimension {:d} != 4.'.
                format(TensorSize.__name__, x.dim))


# class AdaptiveAvgPool2d(Module):
#     __constants__ = ['output_size']
#     def __init__(self, output_size):
#         super(AdaptiveAvgPool2d, self).__init__()
#         self.output_size = _pair(output_size)

#     def extra_repr(self):
#         return 'output_size={output_size}'.format(**self.__dict__)

#     def _calc_out(self, i, idx):
#         return self.output_size[idx]

#     def _calc_flops(self, x, y):
#         raise NotImplementedError

#     def forward(self, x):
#         '''
#         x should be of shape [channels, height, width]
#         '''
#         assert len(x) == 3, 'input size should be 3, which is [channels, height, width].'

#         cin, hin, win = x
#         hout = self._calc_out(hin, 0)
#         wout = self._calc_out(win, 1)
#         y = [cin, hout, wout]

#         # self._calc_flops(x, y)

#         self._input = x
#         self._output = y

#         return y
コード例 #7
0
ファイル: pooling.py プロジェクト: fengyuentau/PyTorch-FLOPs
    def forward(self, x: TensorSize):
        assert isinstance(x, TensorSize), \
            'Type of input must be \'{}\'.'.format(TensorSize.__name__)

        if x.dim == 4:
            bsin, cin, hin, win = x.value
            hout = self._calc_out(hin, 0)
            wout = self._calc_out(win, 1)
            y = TensorSize([bsin, cin, hout, wout])

            return y
        else:
            raise NotImplementedError(
                'Not implemented yet for \'{:s}\' with dimension {:d} != 4.'.
                format(TensorSize.__name__, x.dim))
コード例 #8
0
def pad(input, pad, mode='constant', value=0):
    assert isinstance(input, TensorSize), \
            'Type of input must be \'{}\'.'.format(TensorSize.__name__)
    assert len(pad) % 2 == 0, 'Padding length must be divisible by 2'
    assert len(pad) // 2 <= input.dim, 'Padding length too large'

    if len(pad) == 4 and input.dim == 4:
        bsin, cin, hin, win = input.value
        pleft, pright, ptop, pbottom = pad

        bsout = bsin
        cout = cin
        hout = hin + ptop + pbottom
        wout = win + pleft + pright
        y = TensorSize([bsout, cout, hout, wout])
        return y
    else:
        raise NotImplementedError
コード例 #9
0
    def forward(self, x: TensorSize):
        assert isinstance(x, TensorSize), \
            'Type of input must be \'{}\'.'.format(TensorSize.__name__)
        assert x.value[
            1] == self.in_channels, 'The channel of input {:d} does not match with the definition {:d}'.format(
                x.value[1], self.in_channels)

        if x.dim == 4:
            bsin, cin, hin, win = x.value
            hout = self._calc_out(hin, 0)
            wout = self._calc_out(win, 1)
            y = TensorSize([bsin, self.out_channels, hout, wout])

            return y
        else:
            raise NotImplementedError(
                'Not implemented yet for \'{:s}\' with dimension {:d} != 4.'.
                format(TensorSize.__name__, x.dim))
コード例 #10
0
ファイル: utils.py プロジェクト: fengyuentau/PyTorch-FLOPs
def cat(tensor_sizes, dim: int):
    assert isinstance(tensor_sizes, list) or isinstance(
        tensor_sizes, tuple), 'tensors must be either list or tuple.'

    _dim = tensor_sizes[0].dim
    for t in tensor_sizes:
        assert _dim == t.dim, 'TensorSize(s) must have the same dimension.'

    for i in range(_dim):
        if i != dim:
            _s = tensor_sizes[0].value[i]
            for t in tensor_sizes:
                assert _s == t.value[i], 'tensors must be of the same shape.'

    assert dim in [
        i for i in range(_dim)
    ], 'Given dim {:d} is out of shape of TensorSize(s) {:d}.'.format(
        dim, _dim)

    y = tensor_sizes[0].value
    for i in range(1, len(tensor_sizes)):
        y[dim] += tensor_sizes[i].value[dim]

    return TensorSize(y)
コード例 #11
0
ファイル: upsample.py プロジェクト: fengyuentau/PyTorch-FLOPs
    def forward(self, x: TensorSize):
        assert isinstance(x, TensorSize), \
            'Type of input must be \'{}\'.'.format(TensorSize.__name__)

        if x.dim == 4:
            bsin, cin, hin, win = x.value
            _out = [bsin, cin, hin, win]
            if self.size is not None:
                _out = [bsin, cin, self.size[0], self.size[1]]
            else:
                _out = [
                    bsin, cin, hin * self.scale_factor[0],
                    win * self.scale_factor[1]
                ]
            y = TensorSize(_out)

            self._input = x
            self._output = y

            return y
        else:
            raise NotImplementedError(
                'Not implemented yet for \'{:s}\' with dimension {:d} != 4.'.
                format(TensorSize.__name__, x.dim))
コード例 #12
0
from _utils import test_on

import sys
sys.path.append('.')
from flops_counter import nn
from flops_counter.tensorsize import TensorSize

######
# test on BatchNorm2d
######
bn2d = {
    'layers': [
        nn.BatchNorm2d(64)  # same shape
    ],
    'ins': [TensorSize([1, 64, 112, 112])],
    'out_shape': [TensorSize([1, 64, 112, 112])],
    'out_flops': [4816896]
}

test_on(bn2d)

######
# test on L2Norm2d
######
l2norm2d = {
    'layers': [
        nn.L2Norm2d(256)  # same shape
    ],
    'ins': [TensorSize([1, 256, 56, 56])],
    'out_shape': [TensorSize([1, 256, 56, 56])],
    'out_flops': [2408448]
コード例 #13
0
from _utils import test_on

import sys
sys.path.append('.')
from flops_counter import nn
from flops_counter.tensorsize import TensorSize

######
# test on Upsample
######
upsample = {
    'layers': [
        nn.Upsample(scale_factor=2, mode='bilinear')  # same shape
    ],
    'ins': [TensorSize([1, 1024, 20, 20])],
    'out_shape': [TensorSize([1, 1024, 40, 40])],
    'out_flops': [15974400]
}

test_on(upsample)
コード例 #14
0
ファイル: test_act.py プロジェクト: fengyuentau/PyTorch-FLOPs
from _utils import test_on

import sys
sys.path.append('.')
from flops_counter import nn
from flops_counter.tensorsize import TensorSize

######
# test on ReLU
######
relu = {
    'layers': [
        nn.ReLU()  # same shape
    ],
    'ins': [TensorSize([1, 64, 112, 112])],
    'out_shape': [TensorSize([1, 64, 112, 112])],
    'out_flops': [1605632]
}

test_on(relu)

######
# test on Sigmoid
######
sigmoid = {
    'layers': [
        nn.Sigmoid()  # same shape
    ],
    'ins': [TensorSize([1, 1, 56, 56])],
    'out_shape': [TensorSize([1, 1, 56, 56])],
    'out_flops': [9408]
コード例 #15
0
import sys
sys.path.append('.')
from flops_counter import nn
from flops_counter.tensorsize import TensorSize

######
# test on Conv2d
######
conv = {
    'layers': [
        nn.Conv2d(3, 64, 7, 2, 3, bias=False),  # half of the shape
        nn.Conv2d(64, 64, 1, 1, 0, bias=False),  # same shape
        nn.Conv2d(64, 64, 3, 1, 1, bias=False)  # same shape
    ],
    'ins': [
        TensorSize([1, 3, 224, 224]),
        TensorSize([1, 64, 56, 56]),
        TensorSize([1, 64, 56, 56])
    ],
    'out_shape': [
        TensorSize([1, 64, 112, 112]),
        TensorSize([1, 64, 56, 56]),
        TensorSize([1, 64, 56, 56])
    ],
    'out_flops': [235225088, 25489408, 231010304]
}

test_on(conv)

######
# test on ConvTranspose2d
コード例 #16
0
from _utils import test_on

import sys
sys.path.append('.')
from flops_counter import nn
from flops_counter.tensorsize import TensorSize

######
# test on EltAdd
######
eltadd = {
    'layers': [
        nn.EltAdd() # same shape
    ],
    'ins': [
        TensorSize([64, 112, 112])
    ],
    'out_shape': [
        TensorSize([64, 112, 112])
    ],
    'out_flops': [
        802816
    ]
}

test_on(eltadd)

######
# test on EltMul
######
eltmul = {
コード例 #17
0
import sys
sys.path.append('.')
from flops_counter import nn
from flops_counter.tensorsize import TensorSize

mlist = nn.ModuleList([nn.Conv2d(3, 64, 3, 1, 1)])

ts = TensorSize([1, 3, 224, 224])

# print(mlist[0](ts))
# print(mlist[0].flops)
# print(mlist.flops)


class NN(nn.Module):
    def __init__(self):
        super(NN, self).__init__()

        self.conv = nn.Conv2d(3, 64, 3, 1, 1)
        self.headers = nn.ModuleList(
            [nn.Conv2d(64, 4, 3, 1, 1),
             nn.Conv2d(64, 2, 3, 1, 1)])

    def forward(self, x):
        x = self.conv(x)
        y1 = self.headers[0](x)
        y2 = self.headers[1](x)
        # self.headers.settle(x, y1)
        return y1, y2

コード例 #18
0
from _utils import test_on

import sys

sys.path.append('.')
from flops_counter import nn
from flops_counter.tensorsize import TensorSize

######
# test on Linear
######
linear = {
    'layers': [
        nn.Linear(4096, 8192)  # same shape
    ],
    'ins': [TensorSize([1, 4096])],
    'out_shape': [TensorSize([1, 8192])],
    'out_flops': [67108864]
}

test_on(linear)
コード例 #19
0
from _utils import test_on

import sys
sys.path.append('.')
from flops_counter import nn
from flops_counter.tensorsize import TensorSize

######
# test on MaxPool2d
######
mxpool2d = {
    'layers': [
        nn.MaxPool2d(3, 2, 1) # same shape
    ],
    'ins': [
        TensorSize([1, 64, 112, 112])
    ],
    'out_shape': [
        TensorSize([1, 64, 56, 56])
    ],
    'out_flops': [
        602112
    ]
}

test_on(mxpool2d)