Example #1
0
 def __init__(self,
              num_classes=1000,
              nbits_w=4,
              nbits_a=4,
              q_mode=Qmodes.kernel_wise):
     super(AlexNetQFI, self).__init__()
     self.nbits_w = nbits_w
     self.nbits_a = nbits_a
     self.q_mode = q_mode
     self.features = nn.Sequential(
         ActQ(nbits=-1 if max(nbits_a, nbits_w) <= 0 else 8, signed=True),
         Conv2dQ(3,
                 64,
                 kernel_size=11,
                 stride=4,
                 padding=2,
                 nbits=nbits_w,
                 mode=q_mode),
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2),
         ActQ(nbits=nbits_a),
         Conv2dQ(64,
                 192,
                 kernel_size=5,
                 padding=2,
                 nbits=nbits_w,
                 mode=q_mode),  # conv2
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2),
         ActQ(nbits=nbits_a),
         Conv2dQ(192,
                 384,
                 kernel_size=3,
                 padding=1,
                 nbits=nbits_w,
                 mode=q_mode),  # conv3
         nn.ReLU(inplace=True),
         ActQ(nbits=nbits_a),
         Conv2dQ(384,
                 256,
                 kernel_size=3,
                 padding=1,
                 nbits=nbits_w,
                 mode=q_mode),  # conv4
         nn.ReLU(inplace=True),
         ActQ(nbits=nbits_a),
         Conv2dQ(256,
                 256,
                 kernel_size=3,
                 padding=1,
                 nbits=nbits_w,
                 mode=q_mode),  # conv5
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2),
         ActQ(nbits=nbits_a),
     )
     self.classifier = nn.Sequential(
         # nn.Dropout(),
         LinearQ(256 * 6 * 6, 4096, nbits=nbits_w),  # fc6
         nn.ReLU(inplace=True),
         ActQ(nbits=nbits_a),
         # nn.Dropout(),
         LinearQ(4096, 4096, nbits=nbits_w),  # fc7
         nn.ReLU(inplace=True),
         ActQ(nbits=-1 if max(nbits_a, nbits_w) <= 0 else 8, expand=True),
     )
     # self.shared_fc = LinearQ(4096, num_classes, nbits=nbits_w)
     # self.last_add = EltwiseAdd(inplace=True)
     self.expand_fc = LinearQ(4096 * 2, num_classes, nbits=nbits_w)  # fc8
Example #2
0
 def __init__(self,
              num_classes=1000,
              nbits_w=4,
              nbits_a=4,
              q_mode=Qmodes.kernel_wise,
              l2=True):
     super(AlexNetQPACT, self).__init__()
     self.features = nn.Sequential(
         nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),  # conv1
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2),
         PACT(nbits=nbits_a),
         Conv2dQ(64,
                 192,
                 kernel_size=5,
                 padding=2,
                 nbits=nbits_w,
                 mode=q_mode,
                 l2=l2),  # conv2
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2),
         PACT(nbits=nbits_a),
         Conv2dQ(192,
                 384,
                 kernel_size=3,
                 padding=1,
                 nbits=nbits_w,
                 mode=q_mode,
                 l2=l2),  # conv3
         nn.ReLU(inplace=True),
         PACT(nbits=nbits_a),
         Conv2dQ(384,
                 256,
                 kernel_size=3,
                 padding=1,
                 nbits=nbits_w,
                 mode=q_mode,
                 l2=l2),  # conv4
         nn.ReLU(inplace=True),
         PACT(nbits=nbits_a),
         Conv2dQ(256,
                 256,
                 kernel_size=3,
                 padding=1,
                 nbits=nbits_w,
                 mode=q_mode,
                 l2=l2),  # conv5
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2),
         PACT(nbits=nbits_a),
     )
     self.classifier = nn.Sequential(
         nn.Dropout(),
         # As the experiment result shows, there is no difference between layer wise with kernel wise.
         LinearQ(256 * 6 * 6,
                 4096,
                 nbits=nbits_w,
                 mode=Qmodes.layer_wise,
                 l2=l2),  # fc6
         nn.ReLU(inplace=True),
         PACT(nbits=nbits_a),
         nn.Dropout(),
         LinearQ(4096, 4096, nbits=nbits_w, mode=q_mode.layer_wise,
                 l2=l2),  # fc7
         nn.ReLU(inplace=True),
         nn.Linear(4096, num_classes),  # fc8
     )
Example #3
0
 def __init__(self,
              num_classes=1000,
              nbits_w=4,
              nbits_a=4,
              q_mode=Qmodes.kernel_wise,
              l2=True):
     super(AlexNetQFN, self).__init__()
     self.features = nn.Sequential(
         ActQ(nbits=-1 if max(nbits_a, nbits_w) <= 0 else 8,
              signed=True,
              l2=l2),
         Conv2dQ(3,
                 64,
                 kernel_size=11,
                 stride=4,
                 padding=2,
                 nbits=nbits_w,
                 mode=q_mode,
                 l2=l2),  # conv1
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2),
         ActQ(nbits=nbits_a, l2=l2),
         Conv2dQ(64,
                 192,
                 kernel_size=5,
                 padding=2,
                 nbits=nbits_w,
                 mode=q_mode,
                 l2=l2),  # conv2
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2),
         ActQ(nbits=nbits_a, l2=l2),
         Conv2dQ(192,
                 384,
                 kernel_size=3,
                 padding=1,
                 nbits=nbits_w,
                 mode=q_mode,
                 l2=l2),  # conv3
         nn.ReLU(inplace=True),
         ActQ(nbits=nbits_a, l2=l2),
         Conv2dQ(384,
                 256,
                 kernel_size=3,
                 padding=1,
                 nbits=nbits_w,
                 mode=q_mode,
                 l2=l2),  # conv4
         nn.ReLU(inplace=True),
         ActQ(nbits=nbits_a, l2=l2),
         Conv2dQ(256,
                 256,
                 kernel_size=3,
                 padding=1,
                 nbits=nbits_w,
                 mode=q_mode,
                 l2=l2),  # conv5
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2),
         ActQ(nbits=nbits_a, l2=l2),
     )
     self.classifier = nn.Sequential(
         # nn.Dropout(),
         LinearQ(256 * 6 * 6,
                 4096,
                 nbits=nbits_w,
                 mode=Qmodes.layer_wise,
                 l2=l2),  # fc6
         nn.ReLU(inplace=True),
         ActQ(nbits=nbits_a, l2=l2),
         # nn.Dropout(),
         LinearQ(4096, 4096, nbits=nbits_w, mode=Qmodes.layer_wise,
                 l2=l2),  # fc7
         nn.ReLU(inplace=True),
         ActQ(nbits=nbits_a, l2=l2),  # key layer
         LinearQ(4096,
                 num_classes,
                 nbits=nbits_w,
                 mode=Qmodes.layer_wise,
                 l2=l2),  # fc8
     )
Example #4
0
def convq3x3(in_planes, out_planes, stride=1, nbits_w=4, q_mode=Qmodes.kernel_wise, l2=True):
    """3x3 convolution with padding"""
    return Conv2dQ(in_planes, out_planes, kernel_size=3, stride=stride,
                   padding=1, bias=False, nbits=nbits_w, mode=q_mode, l2=l2)