def __init__(self):
     super(Net, self).__init__()
     # initialize some fix configurations
     self.fc1_fix_params = _generate_default_fix_cfg(["weight", "bias"],
                                                     method=1,
                                                     bitwidth=BITWIDTH)
     self.bn_fc1_params = _generate_default_fix_cfg(
         ["weight", "bias", "running_mean", "running_var"],
         method=1,
         bitwidth=BITWIDTH,
     )
     self.fc2_fix_params = _generate_default_fix_cfg(["weight", "bias"],
                                                     method=1,
                                                     bitwidth=BITWIDTH)
     self.fix_params = [
         _generate_default_fix_cfg(["activation"],
                                   method=1,
                                   bitwidth=BITWIDTH) for _ in range(4)
     ]
     # initialize modules
     self.fc1 = nnf.Linear_fix(784, 100, nf_fix_params=self.fc1_fix_params)
     # self.bn_fc1 = nnf.BatchNorm1d_fix(100, nf_fix_params=self.bn_fc1_params)
     self.fc2 = nnf.Linear_fix(100, 10, nf_fix_params=self.fc2_fix_params)
     self.fix0 = nnf.Activation_fix(nf_fix_params=self.fix_params[0])
     # self.fix0_bn = nnf.Activation_fix(nf_fix_params=self.fix_params[1])
     self.fix1 = nnf.Activation_fix(nf_fix_params=self.fix_params[2])
     self.fix2 = nnf.Activation_fix(nf_fix_params=self.fix_params[3])
 def _init_thread_local(self):
     if self.activation_fixed_bitwidth:
         import nics_fix_pt.nn_fix as nfp
         self.thread_local = utils.LazyThreadLocal(
             creator_map={
                 "fix":
                 lambda: nfp.Activation_fix(
                     nf_fix_params={
                         "activation": {
                             # auto fix
                             "method":
                             torch.autograd.Variable(torch.IntTensor(
                                 np.array([1])),
                                                     requires_grad=False),
                             # not meaningful
                             "scale":
                             torch.autograd.Variable(torch.IntTensor(
                                 np.array([0])),
                                                     requires_grad=False),
                             "bitwidth":
                             torch.autograd.Variable(torch.IntTensor(
                                 np.array([self.activation_fixed_bitwidth])
                             ),
                                                     requires_grad=False)
                         }
                     })
             })
     self.thread_lock = threading.Lock()
Exemple #3
0
def make_layers(cfg, batch_norm=False):
    layers = []
    in_channels = 3
    for v in cfg:
        if v[0] == "maxpooling":
            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
        elif v[0] == "dropout":
            layers += [nn.Dropout()]
        elif "fc" in v[0]:
            fc = nnf.Linear_fix(
                v[1],
                v[2],
                nf_fix_params=_generate_default_fix_cfg(["weight", "bias"],
                                                        method=v[3],
                                                        bitwidth=v[4]),
            )
            activation = nnf.Activation_fix(
                nf_fix_params=_generate_default_fix_cfg(
                    ["activation"], method=v[5], bitwidth=v[6]))
            layers += [fc, activation, nn.ReLU(inplace=True)]
        elif "conv" in v[0]:
            conv2d = nnf.Conv2d_fix(
                in_channels,
                v[1],
                kernel_size=3,
                padding=1,
                nf_fix_params=_generate_default_fix_cfg(["weight", "bias"],
                                                        method=v[2],
                                                        bitwidth=v[3]),
            )
            activation = nnf.Activation_fix(
                nf_fix_params=_generate_default_fix_cfg(
                    ["activation"], method=v[4], bitwidth=v[5]))

            if batch_norm:
                layers += [
                    conv2d, activation,
                    nn.BatchNorm2d(v),
                    nn.ReLU(inplace=True)
                ]
            else:
                layers += [conv2d, activation, nn.ReLU(inplace=True)]
            in_channels = v[1]

    print(layers)
    return nn.Sequential(*layers)
Exemple #4
0
    def __init__(self, features, classifier=None):
        super(VGG_elegant, self).__init__()

        self.features = features
        self.classifier = classifier
        if self.classifier is None:
            self.classifier = nn.Sequential(
                nn.Dropout(),
                nnf.Linear_fix(
                    512,
                    512,
                    nf_fix_params=_generate_default_fix_cfg(["weight", "bias"],
                                                            method=1,
                                                            bitwidth=BITWIDTH),
                ),
                nnf.Activation_fix(nf_fix_params=_generate_default_fix_cfg(
                    ["activation"], method=1, bitwidth=BITWIDTH)),
                nn.ReLU(True),
                nn.Dropout(),
                nnf.Linear_fix(
                    512,
                    512,
                    nf_fix_params=_generate_default_fix_cfg(["weight", "bias"],
                                                            method=1,
                                                            bitwidth=BITWIDTH),
                ),
                nnf.Activation_fix(nf_fix_params=_generate_default_fix_cfg(
                    ["activation"], method=1, bitwidth=BITWIDTH)),
                nn.ReLU(True),
                nnf.Linear_fix(
                    512,
                    10,
                    nf_fix_params=_generate_default_fix_cfg(["weight", "bias"],
                                                            method=1,
                                                            bitwidth=BITWIDTH),
                ),
                nnf.Activation_fix(nf_fix_params=_generate_default_fix_cfg(
                    ["activation"], method=1, bitwidth=BITWIDTH)),
            )

        # Initialize weights
        for m in self.modules():
            if isinstance(m, nnf.Conv2d_fix):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2.0 / n))
                m.bias.data.zero_()
Exemple #5
0
 def forward(self, x):
     activation = nnf.Activation_fix(
         nf_fix_params=_generate_default_fix_cfg(
             ["activation"], method=1, bitwidth=BITWIDTH))
     x = activation(x)
     x = self.features(x)
     x = x.view(x.size(0), -1)
     x = self.classifier(x)
     return x
    def __init__(self, inplanes, planes, stride=1, downsample=None):
        super(BasicBlock, self).__init__()
        self.downsample = downsample
        self.stride = stride

        # initialize some fix configurations
        self.conv1_fix_params = _generate_default_fix_cfg(["weight"],
                                                          method=1,
                                                          bitwidth=BITWIDTH)
        self.conv2_fix_params = _generate_default_fix_cfg(["weight"],
                                                          method=1,
                                                          bitwidth=BITWIDTH)
        '''
        self.bn1_fix_params = _generate_default_fix_cfg(
            ["weight", "bias", "running_mean", "running_var"],
            method=1, bitwidth=BITWIDTH,
        )
        self.bn2_fix_params = _generate_default_fix_cfg(
            ["weight", "bias", "running_mean", "running_var"],
            method=1, bitwidth=BITWIDTH,
        )
        '''
        activation_num = 7 if not downsample else 8
        self.fix_params = [
            _generate_default_fix_cfg(["activation"],
                                      method=1,
                                      bitwidth=BITWIDTH)
            for _ in range(activation_num)
        ]

        # initialize layers with corresponding configurations
        self.conv1 = conv3x3(inplanes, planes, self.conv1_fix_params, stride)
        self.bn1 = nn.BatchNorm2d(planes)
        #self.bn1 = nnf.BatchNorm2d_fix(planes, nf_fix_params=self.bn1_fix_params)
        self.relu = nn.ReLU(inplace=True)
        self.conv2 = conv3x3(planes, planes, self.conv2_fix_params)
        self.bn2 = nn.BatchNorm2d(planes)
        #self.bn2 = nnf.BatchNorm2d_fix(planes, nf_fix_params=self.bn1_fix_params)

        # initialize activation fix modules
        for i in range(len(self.fix_params)):
            setattr(self, "fix" + str(i),
                    nnf.Activation_fix(nf_fix_params=self.fix_params[i]))

        # initialize weights
        for m in self.modules():
            if isinstance(m, nnf.Conv2d_fix):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2.0 / n))
def test_save_state_dict(tmp_path):
    import os
    import numpy as np
    import torch
    from torch import nn

    import nics_fix_pt as nfp
    from nics_fix_pt import nn_fix as nnf
    from nics_fix_pt.utils import _generate_default_fix_cfg

    class _View(nn.Module):
        def __init__(self):
            super(_View, self).__init__()

        def forward(self, inputs):
            return inputs.view(inputs.shape[0], -1)

    data = torch.tensor(np.random.rand(8, 3, 4, 4).astype(np.float32)).cuda()
    ckpt = os.path.join(tmp_path, "tmp.pt")

    model = nn.Sequential(*[
        nnf.Conv2d_fix(3,
                       10,
                       kernel_size=3,
                       padding=1,
                       nf_fix_params=_generate_default_fix_cfg(
                           ["weight", "bias"],
                           scale=2.**np.random.randint(low=-10, high=10),
                           method=nfp.FIX_FIXED)),
        nnf.Conv2d_fix(10,
                       20,
                       kernel_size=3,
                       padding=1,
                       nf_fix_params=_generate_default_fix_cfg(
                           ["weight", "bias"],
                           scale=2.**np.random.randint(low=-10, high=10),
                           method=nfp.FIX_FIXED)),
        nnf.Activation_fix(nf_fix_params=_generate_default_fix_cfg(
            ["activation"],
            scale=2.**np.random.randint(low=-10, high=10),
            method=nfp.FIX_FIXED)),
        nn.AdaptiveAvgPool2d(1),
        _View(),
        nnf.Linear_fix(20,
                       10,
                       nf_fix_params=_generate_default_fix_cfg(
                           ["weight", "bias"],
                           scale=2.**np.random.randint(low=-10, high=10),
                           method=nfp.FIX_FIXED))
    ])
    model.cuda()
    pre_results = model(data)
    torch.save(model.state_dict(), ckpt)
    model2 = nn.Sequential(*[
        nnf.Conv2d_fix(3,
                       10,
                       kernel_size=3,
                       padding=1,
                       nf_fix_params=_generate_default_fix_cfg(
                           ["weight", "bias"],
                           scale=2.**np.random.randint(low=-10, high=10),
                           method=nfp.FIX_FIXED)),
        nnf.Conv2d_fix(10,
                       20,
                       kernel_size=3,
                       padding=1,
                       nf_fix_params=_generate_default_fix_cfg(
                           ["weight", "bias"],
                           scale=2.**np.random.randint(low=-10, high=10),
                           method=nfp.FIX_FIXED)),
        nnf.Activation_fix(nf_fix_params=_generate_default_fix_cfg(
            ["activation"],
            scale=2.**np.random.randint(low=-10, high=10),
            method=nfp.FIX_FIXED)),
        nn.AdaptiveAvgPool2d(1),
        _View(),
        nnf.Linear_fix(20,
                       10,
                       nf_fix_params=_generate_default_fix_cfg(
                           ["weight", "bias"],
                           scale=2.**np.random.randint(low=-10, high=10),
                           method=nfp.FIX_FIXED))
    ])
    model2.cuda()
    model2.load_state_dict(torch.load(ckpt))
    post_results = model2(data)
    assert (post_results - pre_results < 1e-2).all()
    def __init__(self, fix_bn=True, fix_grad=True, bitwidth_data=8, bitwidth_grad=16,
                 range_method=nfp.RangeMethod.RANGE_MAX,
                 grad_range_method=nfp.RangeMethod.RANGE_MAX):
        super(FixNet, self).__init__()

        print("fix bn: {}; fix grad: {}; range method: {}; grad range method: {}".format(
            fix_bn, fix_grad, range_method, grad_range_method
        ))

        # fix configurations (data/grad) for parameters/buffers
        self.fix_param_cfgs = {}
        self.fix_grad_cfgs = {}
        layers = [("conv1_1", 128, 3), ("bn1_1",), ("conv1_2", 128, 3), ("bn1_2",),
                  ("conv1_3", 128, 3), ("bn1_3",), ("conv2_1", 256, 3), ("bn2_1",),
                  ("conv2_2", 256, 3), ("bn2_2",), ("conv2_3", 256, 3), ("bn2_3",),
                  ("conv3_1", 512, 3), ("bn3_1",), ("nin3_2", 256, 1), ("bn3_2",),
                  ("nin3_3", 128, 1), ("bn3_3",), ("fc4", 10)]
        for layer_cfg in layers:
            name = layer_cfg[0]
            if "bn" in name and not fix_bn:
                continue
            # data fix config
            self.fix_param_cfgs[name] = _generate_default_fix_cfg(
                ["weight", "bias", "running_mean", "running_var"] \
                if "bn" in name else ["weight", "bias"],
                method=1, bitwidth=bitwidth_data, range_method=range_method
            )
            if fix_grad:
                # grad fix config
                self.fix_grad_cfgs[name] = _generate_default_fix_cfg(
                    ["weight", "bias"], method=1, bitwidth=bitwidth_grad,
                    range_method=grad_range_method
                )

        # fix configurations for activations
        # data fix config
        self.fix_act_cfgs = [
            _generate_default_fix_cfg(["activation"], method=1, bitwidth=bitwidth_data,
                                      range_method=range_method)
            for _ in range(20)
        ]
        if fix_grad:
            # grad fix config
            self.fix_act_grad_cfgs = [
                _generate_default_fix_cfg(["activation"], method=1, bitwidth=bitwidth_grad,
                                          range_method=grad_range_method)
                for _ in range(20)
            ]

        # construct layers
        cin = 3
        for layer_cfg in layers:
            name = layer_cfg[0]
            if "conv" in name or "nin" in name:
                # convolution layers
                cout, kernel_size = layer_cfg[1:]
                layer = nnf.Conv2d_fix(
                    cin, cout,
                    nf_fix_params=self.fix_param_cfgs[name],
                    nf_fix_params_grad=self.fix_grad_cfgs[name] if fix_grad else None,
                    kernel_size=kernel_size,
                    padding=(kernel_size - 1) // 2 if name != "conv3_1" else 0)
                cin = cout
            elif "bn" in name:
                # bn layers
                if fix_bn:
                    layer = nnf.BatchNorm2d_fix(
                        cin,
                        nf_fix_params=self.fix_param_cfgs[name],
                        nf_fix_params_grad=self.fix_grad_cfgs[name] if fix_grad else None)
                else:
                    layer = nn.BatchNorm2d(cin)
            elif "fc" in name:
                # fully-connected layers
                cout = layer_cfg[1]
                layer = nnf.Linear_fix(
                    cin, cout,
                    nf_fix_params=self.fix_param_cfgs[name],
                    nf_fix_params_grad=self.fix_grad_cfgs[name] if fix_grad else None)
                cin = cout
            # call setattr
            setattr(self, name, layer)

        for i in range(20):
            setattr(self, "fix" + str(i), nnf.Activation_fix(
                nf_fix_params=self.fix_act_cfgs[i],
                nf_fix_params_grad=self.fix_act_grad_cfgs[i] if fix_grad else None))

        self.pool1 = nn.MaxPool2d((2, 2))
        self.pool2 = nn.MaxPool2d((2, 2))
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
    def __init__(self, depth, num_classes=10, block_name='BasicBlock'):
        super(ResNet, self).__init__()
        # Model type specifies number of layers for CIFAR-10 model
        if block_name.lower() == 'basicblock':
            assert (
                depth - 2
            ) % 6 == 0, 'When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202'
            n = (depth - 2) // 6
            block = BasicBlock
        elif block_name.lower() == 'bottleneck':
            assert (
                depth - 2
            ) % 9 == 0, 'When use bottleneck, depth should be 9n+2, e.g. 20, 29, 47, 56, 110, 1199'
            n = (depth - 2) // 9
            block = Bottleneck
        else:
            raise ValueError('block_name shoule be Basicblock or Bottleneck')

        # initialize fix configurations
        self.conv1_fix_params = _generate_default_fix_cfg(["weight"],
                                                          method=1,
                                                          bitwidth=BITWIDTH)
        self.conv2_fix_params = _generate_default_fix_cfg(["weight"],
                                                          method=1,
                                                          bitwidth=BITWIDTH)
        '''
        self.bn1_fix_params = _generate_default_fix_cfg(
            ["weight", "bias", "running_mean", "running_var"],
            method=1, bitwidth=BITWIDTH,
        )
        self.bn2_fix_params = _generate_default_fix_cfg(
            ["weight", "bias", "running_mean", "running_var"],
            method=1, bitwidth=BITWIDTH,
        )
        '''
        self.fc_fix_params = _generate_default_fix_cfg(["weight", "bias"],
                                                       method=1,
                                                       bitwidth=BITWIDTH)
        self.fix_params = [
            _generate_default_fix_cfg(["activation"],
                                      method=1,
                                      bitwidth=BITWIDTH) for _ in range(6)
        ]

        self.inplanes = 16
        self.conv1 = nnf.Conv2d_fix(3,
                                    16,
                                    kernel_size=3,
                                    padding=1,
                                    bias=False,
                                    nf_fix_params=self.conv1_fix_params)
        #self.bn1 = nnf.BatchNorm2d_fix(16, nf_fix_params=self.bn1_fix_params)
        self.bn1 = nn.BatchNorm2d(16)
        self.relu = nn.ReLU(inplace=True)
        self.layer1 = self._make_layer(block, 16, n)
        self.layer2 = self._make_layer(block, 32, n, stride=2)
        self.layer3 = self._make_layer(block, 64, n, stride=2)
        self.avgpool = nn.AvgPool2d(8)
        self.fc = nnf.Linear_fix(64 * block.expansion,
                                 num_classes,
                                 nf_fix_params=self.fc_fix_params)

        # initialize activation fix modules
        for i in range(len(self.fix_params)):
            setattr(self, "fix" + str(i),
                    nnf.Activation_fix(nf_fix_params=self.fix_params[i]))

        for m in self.modules():
            if isinstance(m, nnf.Conv2d_fix):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Exemple #10
0
    def __init__(self, inplanes, planes, stride=1, downsample=None):
        super(Bottleneck, self).__init__()
        self.downsample = downsample
        self.stride = stride

        # initialize some fix configurations
        self.conv1_fix_params = _generate_default_fix_cfg(["weight"],
                                                          method=1,
                                                          bitwidth=BITWIDTH)
        self.conv2_fix_params = _generate_default_fix_cfg(["weight"],
                                                          method=1,
                                                          bitwidth=BITWIDTH)
        self.conv3_fix_params = _generate_default_fix_cfg(["weight"],
                                                          method=1,
                                                          bitwidth=BITWIDTH)
        '''
        self.bn1_fix_params = _generate_default_fix_cfg(
            ["weight", "bias", "running_mean", "running_var"],
            method=1, bitwidth=BITWIDTH,
        )
        self.bn2_fix_params = _generate_default_fix_cfg(
            ["weight", "bias", "running_mean", "running_var"],
            method=1, bitwidth=BITWIDTH,
        )
        self.bn3_fix_params = _generate_default_fix_cfg(
            ["weight", "bias", "running_mean", "running_var"],
            method=1, bitwidth=BITWIDTH,
        )
        '''
        activation_num = 10 if not downsample else 11
        self.fix_params = [
            _generate_default_fix_cfg(["activation"],
                                      method=1,
                                      bitwidth=BITWIDTH)
            for _ in range(activation_num)
        ]

        # initialize activation fix modules
        for i in range(len(self.fix_params)):
            setattr(self, "fix" + str(i),
                    nnf.Activation_fix(nf_fix_params=self.fix_params[i]))
        '''
        self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm2d(planes)
        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
                               padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(planes)
        self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
        self.bn3 = nn.BatchNorm2d(planes * 4)
        self.relu = nn.ReLU(inplace=True)
        '''
        self.conv1 = nnf.Conv2d_fix(inplanes,
                                    planes,
                                    kernel_size=1,
                                    bias=False,
                                    nf_fix_params=self.conv1_fix_params)
        #self.bn1 = nnf.BatchNorm2d_fix(planes, nf_fix_params=self.bn1_fix_params)
        self.bn1 = nn.BatchNorm2d(planes)
        self.conv2 = nnf.Conv2d_fix(planes,
                                    planes,
                                    kernel_size=3,
                                    stride=stride,
                                    padding=1,
                                    bias=False,
                                    nf_fix_params=self.conv2_fix_params)
        #self.bn2 = nnf.BatchNorm2d_fix(planes, nf_fix_params=self.bn2_fix_params)
        self.bn2 = nn.BatchNorm2d(planes)
        self.conv3 = nnf.Conv2d_fix(planes,
                                    planes * 4,
                                    kernel_size=1,
                                    bias=False,
                                    nf_fix_params=self.conv3_fix_params)
        #self.bn3 = nnf.BatchNorm2d_fix(planes * 4, nf_fix_params=self.bn3_fix_params)
        self.bn3 = nn.BatchNorm2d(planes * 4)
        self.relu = nn.ReLU(inplace=True)
Exemple #11
0
    def __init__(self):
        super(VGG_ugly, self).__init__()
        # initialize some fix configurations
        self.conv1_fix_params = _generate_default_fix_cfg(
            ["weight", "bias"], method=1, bitwidth=6)  # BITWIDTH)
        self.conv2_fix_params = _generate_default_fix_cfg(
            ["weight", "bias"], method=1, bitwidth=6)  # BITWIDTH)
        self.conv3_1_fix_params = _generate_default_fix_cfg(
            ["weight", "bias"], method=1, bitwidth=6)  # BITWIDTH)
        self.conv3_2_fix_params = _generate_default_fix_cfg(
            ["weight", "bias"], method=1, bitwidth=6)  # BITWIDTH)
        self.conv4_1_fix_params = _generate_default_fix_cfg(
            ["weight", "bias"], method=1, bitwidth=6)  # BITWIDTH)
        self.conv4_2_fix_params = _generate_default_fix_cfg(
            ["weight", "bias"], method=1, bitwidth=6)  # BITWIDTH)
        self.conv5_1_fix_params = _generate_default_fix_cfg(
            ["weight", "bias"], method=1, bitwidth=6)  # BITWIDTH)
        self.conv5_2_fix_params = _generate_default_fix_cfg(
            ["weight", "bias"], method=1, bitwidth=6)  # BITWIDTH)
        self.fc1_fix_params = _generate_default_fix_cfg(["weight", "bias"],
                                                        method=1,
                                                        bitwidth=BITWIDTH)
        self.fc2_fix_params = _generate_default_fix_cfg(["weight", "bias"],
                                                        method=1,
                                                        bitwidth=BITWIDTH)
        self.fc3_fix_params = _generate_default_fix_cfg(["weight", "bias"],
                                                        method=1,
                                                        bitwidth=BITWIDTH)
        self.fix_params = [
            _generate_default_fix_cfg(["activation"],
                                      method=1,
                                      bitwidth=BITWIDTH) for _ in range(12)
        ]
        # initialize modules
        kwargs = {"kernel_size": 3, "padding": 1}
        self.conv1 = nnf.Conv2d_fix(3,
                                    64,
                                    nf_fix_params=self.conv1_fix_params,
                                    **kwargs)
        self.conv2 = nnf.Conv2d_fix(64,
                                    128,
                                    nf_fix_params=self.conv2_fix_params,
                                    **kwargs)
        self.conv3_1 = nnf.Conv2d_fix(128,
                                      256,
                                      nf_fix_params=self.conv3_1_fix_params,
                                      **kwargs)
        self.conv3_2 = nnf.Conv2d_fix(256,
                                      256,
                                      nf_fix_params=self.conv3_2_fix_params,
                                      **kwargs)
        self.conv4_1 = nnf.Conv2d_fix(256,
                                      512,
                                      nf_fix_params=self.conv4_1_fix_params,
                                      **kwargs)
        self.conv4_2 = nnf.Conv2d_fix(512,
                                      512,
                                      nf_fix_params=self.conv4_2_fix_params,
                                      **kwargs)
        self.conv5_1 = nnf.Conv2d_fix(512,
                                      512,
                                      nf_fix_params=self.conv5_1_fix_params,
                                      **kwargs)
        self.conv5_2 = nnf.Conv2d_fix(512,
                                      512,
                                      nf_fix_params=self.conv5_2_fix_params,
                                      **kwargs)
        self.fc1 = nnf.Linear_fix(512, 512, nf_fix_params=self.fc1_fix_params)
        self.fc2 = nnf.Linear_fix(512, 512, nf_fix_params=self.fc2_fix_params)
        self.fc3 = nnf.Linear_fix(512, 10, nf_fix_params=self.fc3_fix_params)
        self.fix0 = nnf.Activation_fix(nf_fix_params=self.fix_params[0])
        self.fix1 = nnf.Activation_fix(nf_fix_params=self.fix_params[1])
        self.fix2 = nnf.Activation_fix(nf_fix_params=self.fix_params[2])
        self.fix3 = nnf.Activation_fix(nf_fix_params=self.fix_params[3])
        self.fix4 = nnf.Activation_fix(nf_fix_params=self.fix_params[4])
        self.fix5 = nnf.Activation_fix(nf_fix_params=self.fix_params[5])
        self.fix6 = nnf.Activation_fix(nf_fix_params=self.fix_params[6])
        self.fix7 = nnf.Activation_fix(nf_fix_params=self.fix_params[7])
        self.fix8 = nnf.Activation_fix(nf_fix_params=self.fix_params[8])
        self.fix9 = nnf.Activation_fix(nf_fix_params=self.fix_params[9])
        self.fix10 = nnf.Activation_fix(nf_fix_params=self.fix_params[10])
        self.fix11 = nnf.Activation_fix(nf_fix_params=self.fix_params[11])

        # Initialize weights
        for m in self.modules():
            if isinstance(m, nnf.Conv2d_fix):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2.0 / n))
                m.bias.data.zero_()
Exemple #12
0
    def __init__(self,
                 bit_width=8,
                 input_fix=False,
                 output_fix=False,
                 conv_weight_fix=[False, False, False, False, False, False],
                 fc_weight_fix=[False, False, False],
                 conv_output_fix=[False, False, False, False, False, False],
                 fc_output_fix=[False, False, False]):
        super(FixOdometryNet, self).__init__()
        self.img_width = 608
        self.img_height = 160
        self.bit_width = 8

        self.input_fix = input_fix
        self.conv_weight_fix = conv_weight_fix
        self.conv_output_fix = conv_output_fix
        self.fc_weight_fix = fc_weight_fix
        self.fc_output_fix = fc_output_fix
        self.output_fix = output_fix

        # input
        if self.input_fix:
            self.input_fix_params = _generate_default_fix_cfg(
                ['activation'], method=1, bitwidth=self.bit_width)
            self.fix_input = nnf.Activation_fix(
                nf_fix_params=self.input_fix_params)
        else:
            self.fix_input = lambda x: x

        # initialize modules
        conv_channels = [16, 32, 64, 128, 256, 256]

        # conv1
        if self.conv_weight_fix[0]:
            self.conv1_weight_fix_params = _generate_default_fix_cfg(
                ['weight', 'bias'], method=1, bitwidth=self.bit_width)
            self.conv1 = nnf.Conv2d_fix(6, conv_channels[0], kernel_size=7, padding=3, stride=2,\
                nf_fix_params=self.conv1_weight_fix_params)
        else:
            self.conv1 = nn.Conv2d(6,
                                   conv_channels[0],
                                   kernel_size=7,
                                   padding=3,
                                   stride=2)
        if self.conv_output_fix[0]:
            self.conv1_output_fix_params = _generate_default_fix_cfg(
                ['activation'], method=1, bitwidth=self.bit_width)
            self.fix_conv1 = nnf.Activation_fix(
                nf_fix_params=self.conv1_output_fix_params)
        else:
            self.fix_conv1 = lambda x: x
        self.relu1 = nn.ReLU(inplace=True)

        # conv2
        if self.conv_weight_fix[1]:
            self.conv2_weight_fix_params = _generate_default_fix_cfg(
                ['weight', 'bias'], method=1, bitwidth=self.bit_width)
            self.conv2 = nnf.Conv2d_fix(conv_channels[0], conv_channels[1], kernel_size=5, padding=2, stride=2,\
                nf_fix_params=self.conv2_weight_fix_params)
        else:
            self.conv2 = nn.Conv2d(conv_channels[0],
                                   conv_channels[1],
                                   kernel_size=5,
                                   padding=2,
                                   stride=2)
        if self.conv_output_fix[1]:
            self.conv2_output_fix_params = _generate_default_fix_cfg(
                ['activation'], method=1, bitwidth=self.bit_width)
            self.fix_conv2 = nnf.Activation_fix(
                nf_fix_params=self.conv2_output_fix_params)
        else:
            self.fix_conv2 = lambda x: x
        self.relu2 = nn.ReLU(inplace=True)

        # conv3
        if self.conv_weight_fix[2]:
            self.conv3_weight_fix_params = _generate_default_fix_cfg(
                ['weight', 'bias'], method=1, bitwidth=self.bit_width)
            self.conv3 = nnf.Conv2d_fix(conv_channels[1], conv_channels[2], kernel_size=3, padding=1, stride=2,\
                nf_fix_params=self.conv3_weight_fix_params)
        else:
            self.conv3 = nn.Conv2d(conv_channels[1],
                                   conv_channels[2],
                                   kernel_size=3,
                                   padding=1,
                                   stride=2)
        if self.conv_output_fix[2]:
            self.conv3_output_fix_params = _generate_default_fix_cfg(
                ['activation'], method=1, bitwidth=self.bit_width)
            self.fix_conv3 = nnf.Activation_fix(
                nf_fix_params=self.conv3_output_fix_params)
        else:
            self.fix_conv3 = lambda x: x
        self.relu3 = nn.ReLU(inplace=True)

        # conv4
        if self.conv_weight_fix[3]:
            self.conv4_weight_fix_params = _generate_default_fix_cfg(
                ['weight', 'bias'], method=1, bitwidth=self.bit_width)
            self.conv4 = nnf.Conv2d_fix(conv_channels[2], conv_channels[3], kernel_size=3, padding=1, stride=2,\
                nf_fix_params=self.conv4_weight_fix_params)
        else:
            self.conv4 = nn.Conv2d(conv_channels[2],
                                   conv_channels[3],
                                   kernel_size=3,
                                   padding=1,
                                   stride=2)
        if self.conv_output_fix[3]:
            self.conv4_output_fix_params = _generate_default_fix_cfg(
                ['activation'], method=1, bitwidth=self.bit_width)
            self.fix_conv4 = nnf.Activation_fix(
                nf_fix_params=self.conv4_output_fix_params)
        else:
            self.fix_conv4 = lambda x: x
        self.relu4 = nn.ReLU(inplace=True)

        # conv5
        if self.conv_weight_fix[4]:
            self.conv5_weight_fix_params = _generate_default_fix_cfg(
                ['weight', 'bias'], method=1, bitwidth=self.bit_width)
            self.conv5 = nnf.Conv2d_fix(conv_channels[3], conv_channels[4], kernel_size=3, padding=1, stride=2,\
                nf_fix_params=self.conv5_weight_fix_params)
        else:
            self.conv5 = nn.Conv2d(conv_channels[3],
                                   conv_channels[4],
                                   kernel_size=3,
                                   padding=1,
                                   stride=2)
        if self.conv_output_fix[4]:
            self.conv5_output_fix_params = _generate_default_fix_cfg(
                ['activation'], method=1, bitwidth=self.bit_width)
            self.fix_conv5 = nnf.Activation_fix(
                nf_fix_params=self.conv5_output_fix_params)
        else:
            self.fix_conv5 = lambda x: x
        self.relu5 = nn.ReLU(inplace=True)

        # conv6
        if self.conv_weight_fix[5]:
            self.conv6_weight_fix_params = _generate_default_fix_cfg(
                ['weight', 'bias'], method=1, bitwidth=self.bit_width)
            self.conv6 = nnf.Conv2d_fix(conv_channels[4], conv_channels[5], kernel_size=3, padding=1, stride=2,\
                nf_fix_params=self.conv6_weight_fix_params)
        else:
            self.conv6 = nn.Conv2d(conv_channels[4],
                                   conv_channels[5],
                                   kernel_size=3,
                                   padding=1,
                                   stride=2)
        if self.conv_output_fix[5]:
            self.conv6_output_fix_params = _generate_default_fix_cfg(
                ['activation'], method=1, bitwidth=self.bit_width)
            self.fix_conv6 = nnf.Activation_fix(
                nf_fix_params=self.conv6_output_fix_params)
        else:
            self.fix_conv6 = lambda x: x
        self.relu6 = nn.ReLU(inplace=True)

        # fc1
        if self.fc_weight_fix[0]:
            self.fc1_weight_fix_params = _generate_default_fix_cfg(
                ['weight', 'bias'], method=1, bitwidth=bit_width)
            self.fc1 = nnf.Linear_fix(conv_channels[5] * 3 * 10,
                                      512,
                                      nf_fix_params=self.fc1_weight_fix_params)
        else:
            self.fc1 = nn.Linear(conv_channels[5] * 3 * 10, 512)
        if self.fc_output_fix[0]:
            self.fc1_output_fix_params = _generate_default_fix_cfg(
                ['weight', 'bias'], method=1, bitwidth=bit_width)
            self.fix_fc1 = nnf.Activation_fix(
                nf_fix_params=self.fc1_output_fix_params)
        else:
            self.fix_fc1 = lambda x: x
        self.relu_fc1 = nn.ReLU(inplace=True)

        # fc2
        if self.fc_weight_fix[1]:
            self.fc2_weight_fix_params = _generate_default_fix_cfg(
                ['weight', 'bias'], method=1, bitwidth=bit_width)
            self.fc2 = nnf.Linear_fix(512,
                                      512,
                                      nf_fix_params=self.fc2_weight_fix_params)
        else:
            self.fc2 = nn.Linear(512, 512)
        if self.fc_output_fix[1]:
            self.fc2_output_fix_params = _generate_default_fix_cfg(
                ['weight', 'bias'], method=1, bitwidth=bit_width)
            self.fix_fc2 = nnf.Activation_fix(
                nf_fix_params=self.fc2_output_fix_params)
        else:
            self.fix_fc2 = lambda x: x
        self.relu_fc2 = nn.ReLU(inplace=True)

        # fc_pose
        if self.fc_weight_fix[2]:
            self.fc_pose_weight_fix_params = _generate_default_fix_cfg(
                ['weight', 'bias'], method=1, bitwidth=bit_width)
            self.fc_pose = nnf.Linear_fix(
                512, 6, nf_fix_params=self.fc_pose_weight_fix_params)
        else:
            self.fc_pose = nn.Linear(512, 6)
        if self.fc_output_fix[2]:
            self.fc_pose_output_fix_params = _generate_default_fix_cfg(
                ['weight', 'bias'], method=1, bitwidth=bit_width)
            self.fix_fc_pose = nnf.Activation_fix(
                nf_fix_params=self.fc_pose_output_fix_params)
        else:
            self.fix_fc_pose = lambda x: x
        self.fix_fc_pose = nn.ReLU(inplace=True)

        # output
        if self.output_fix:
            self.output_fix_params = _generate_default_fix_cfg(
                ['activation'], method=1, bitwidth=self.bit_width)
            self.fix_output = nnf.Activation_fix(
                nf_fix_params=self.output_fix_params)
        else:
            self.fix_output = lambda x: x
Exemple #13
0
    def __init__(self,
                 fix=True,
                 fix_bn=True,
                 bitwidths=[8, 8, None],
                 default_f=None):
        '''
        --- The Fixed net ---
        note that here we can only control whether quantize bn or not
        if u want to only fix bn weight&bias but no-fix for runnning values
        use net.set_fix_method() to achieve that
        '''
        super(MyNet_fix, self).__init__()

        print("fix bn: {}; Bitwidths: {}".format(fix_bn, bitwidths))
        fix_grad = bitwidths[2] is not -1
        # fix configurations (data/grad) for parameters/buffers
        _generate_default_fix_cfg = default_f
        self.fix_param_cfgs = {}
        self.fix_grad_cfgs = {}
        layers = [("conv1_1", 128, 3), ("bn1_1", ), ("conv1_2", 128, 3),
                  ("bn1_2", ), ("conv1_3", 128, 3), ("bn1_3", ),
                  ("conv2_1", 256, 3), ("bn2_1", ), ("conv2_2", 256, 3),
                  ("bn2_2", ), ("conv2_3", 256, 3), ("bn2_3", ),
                  ("conv3_1", 512, 3), ("bn3_1", ), ("nin3_2", 256, 1),
                  ("bn3_2", ), ("nin3_3", 128, 1), ("bn3_3", ), ("fc4", 10)]
        for layer_cfg in layers:
            name = layer_cfg[0]
            if "bn" in name and not fix_bn:
                continue
            # params fix configv
            if fix:
                self.fix_param_cfgs[name] = _generate_default_fix_cfg(
                    ["weight", "bias", "running_mean", "running_var"]
                    if "bn" in name else ["weight", "bias"],
                    # ["weight", "bias"] if "bn" in name else ["weight", "bias"],
                    method=1,
                    bitwidth=bitwidths[0])
            else:
                self.fix_param_cfgs[name] = _generate_default_fix_cfg(
                    ["weight", "bias", "running_mean", "running_var"]
                    if "bn" in name else ["weight", "bias"],
                    # ["weight", "bias"] if "bn" in name else ["weight", "bias"],
                    method=0,
                    bitwidth=bitwidths[0])

            if bitwidths[2] is not -1:
                # grad fix config
                self.fix_grad_cfgs[name] = _generate_default_fix_cfg(
                    ["weight", "bias"], method=1, bitwidth=bitwidths[2])

        # fix configurations for activations
        if fix:
            self.fix_act_cfgs = [
                _generate_default_fix_cfg(["activation"],
                                          method=1,
                                          bitwidth=bitwidths[1])
                for _ in range(20)
            ]
        else:
            self.fix_act_cfgs = [
                _generate_default_fix_cfg(["activation"],
                                          method=0,
                                          bitwidth=bitwidths[1])
                for _ in range(20)
            ]

        if bitwidths[2] is not -1:
            # grad fix config
            self.fix_act_grad_cfgs = [
                _generate_default_fix_cfg(["activation"],
                                          method=1,
                                          bitwidth=bitwidths[2])
                for _ in range(20)
            ]

        # construct layers
        cin = 3
        for layer_cfg in layers:
            name = layer_cfg[0]
            if "conv" in name or "nin" in name:
                # convolution layers
                cout, kernel_size = layer_cfg[1:]
                layer = nnf.Conv2d_fix(
                    cin,
                    cout,
                    nf_fix_params=self.fix_param_cfgs[name],
                    nf_fix_params_grad=self.fix_grad_cfgs[name]
                    if fix_grad else None,
                    kernel_size=kernel_size,
                    padding=(kernel_size - 1) // 2 if name != "conv3_1" else 0)
                cin = cout
            elif "bn" in name:
                # bn layers
                if fix_bn:
                    # layer = nnf.BatchNorm2d_fix(
                    layer = nnf.MyBN_fix(
                        cin,
                        nf_fix_params=self.fix_param_cfgs[name],
                        nf_fix_params_grad=self.fix_grad_cfgs[name]
                        if fix_grad else None)
                else:
                    layer = nn.BatchNorm2d(cin)
            elif "fc" in name:
                # fully-connected layers
                cout = layer_cfg[1]
                layer = nnf.Linear_fix(
                    cin,
                    cout,
                    nf_fix_params=self.fix_param_cfgs[name],
                    nf_fix_params_grad=self.fix_grad_cfgs[name]
                    if fix_grad else None)
                cin = cout
            # call setattr
            setattr(self, name, layer)

        for i in range(20):
            setattr(
                self, "fix" + str(i),
                nnf.Activation_fix(nf_fix_params=self.fix_act_cfgs[i],
                                   nf_fix_params_grad=self.fix_act_grad_cfgs[i]
                                   if fix_grad else None))

        self.pool1 = nn.MaxPool2d((2, 2))
        self.pool2 = nn.MaxPool2d((2, 2))
        self.avg_pool = nn.AdaptiveAvgPool2d(1)