Пример #1
0
from flops_counter import nn
from flops_counter.tensorsize import TensorSize

######
# test on ReLU
######
relu = {
    'layers': [
        nn.ReLU()  # same shape
    ],
    'ins': [TensorSize([1, 64, 112, 112])],
    'out_shape': [TensorSize([1, 64, 112, 112])],
    'out_flops': [1605632]
}

test_on(relu)

######
# test on Sigmoid
######
sigmoid = {
    'layers': [
        nn.Sigmoid()  # same shape
    ],
    'ins': [TensorSize([1, 1, 56, 56])],
    'out_shape': [TensorSize([1, 1, 56, 56])],
    'out_flops': [9408]
}

test_on(sigmoid)
Пример #2
0
from _utils import test_on

import sys
sys.path.append('.')
from flops_counter import nn
from flops_counter.tensorsize import TensorSize

######
# test on Upsample
######
upsample = {
    'layers': [
        nn.Upsample(scale_factor=2, mode='bilinear')  # same shape
    ],
    'ins': [TensorSize([1, 1024, 20, 20])],
    'out_shape': [TensorSize([1, 1024, 40, 40])],
    'out_flops': [15974400]
}

test_on(upsample)
Пример #3
0
from _utils import test_on

import sys

sys.path.append('.')
from flops_counter import nn
from flops_counter.tensorsize import TensorSize

######
# test on Linear
######
linear = {
    'layers': [
        nn.Linear(4096, 8192)  # same shape
    ],
    'ins': [TensorSize([1, 4096])],
    'out_shape': [TensorSize([1, 8192])],
    'out_flops': [67108864]
}

test_on(linear)
Пример #4
0
from _utils import test_on

import sys
sys.path.append('.')
from flops_counter import nn
from flops_counter.tensorsize import TensorSize

######
# test on MaxPool2d
######
mxpool2d = {
    'layers': [
        nn.MaxPool2d(3, 2, 1) # same shape
    ],
    'ins': [
        TensorSize([1, 64, 112, 112])
    ],
    'out_shape': [
        TensorSize([1, 64, 56, 56])
    ],
    'out_flops': [
        602112
    ]
}

test_on(mxpool2d)
Пример #5
0
        nn.Conv2d(64, 64, 3, 1, 1, bias=False)  # same shape
    ],
    'ins': [
        TensorSize([1, 3, 224, 224]),
        TensorSize([1, 64, 56, 56]),
        TensorSize([1, 64, 56, 56])
    ],
    'out_shape': [
        TensorSize([1, 64, 112, 112]),
        TensorSize([1, 64, 56, 56]),
        TensorSize([1, 64, 56, 56])
    ],
    'out_flops': [235225088, 25489408, 231010304]
}

test_on(conv)

######
# test on ConvTranspose2d
######
convtran = {
    'layers': [
        nn.ConvTranspose2d(512, 256, 4, 2,
                           1),  # double the shape, except channels
        nn.ConvTranspose2d(1024, 256, 4, 4,
                           0)  # quadro the shape, except channels
    ],
    'ins': [TensorSize([1, 512, 28, 28]),
            TensorSize([1, 1024, 14, 14])],
    'out_shape': [TensorSize([1, 256, 56, 56]),
                  TensorSize([1, 256, 56, 56])],
Пример #6
0
eltadd = {
    'layers': [
        nn.EltAdd() # same shape
    ],
    'ins': [
        TensorSize([64, 112, 112])
    ],
    'out_shape': [
        TensorSize([64, 112, 112])
    ],
    'out_flops': [
        802816
    ]
}

test_on(eltadd)

######
# test on EltMul
######
eltmul = {
    'layers': [
        nn.EltMul() # same shape
    ],
    'ins': [
        TensorSize([1, 64, 112, 112])
    ],
    'out_shape': [
        TensorSize([1, 64, 112, 112])
    ],
    'out_flops': [
Пример #7
0
sys.path.append('.')
from flops_counter import nn
from flops_counter.tensorsize import TensorSize

######
# test on BatchNorm2d
######
bn2d = {
    'layers': [
        nn.BatchNorm2d(64)  # same shape
    ],
    'ins': [TensorSize([1, 64, 112, 112])],
    'out_shape': [TensorSize([1, 64, 112, 112])],
    'out_flops': [4816896]
}

test_on(bn2d)

######
# test on L2Norm2d
######
l2norm2d = {
    'layers': [
        nn.L2Norm2d(256)  # same shape
    ],
    'ins': [TensorSize([1, 256, 56, 56])],
    'out_shape': [TensorSize([1, 256, 56, 56])],
    'out_flops': [2408448]
}

test_on(l2norm2d)