예제 #1
0
 def __init__(self):
     super(ManualConvLinearQATModel, self).__init__()
     self.qconfig = torch.quantization.get_default_qat_qconfig("qnnpack")
     self.quant = QuantStub()
     self.dequant = DeQuantStub()
     self.conv = torch.nn.Conv2d(3, 1, kernel_size=3).to(dtype=torch.float)
     self.fc1 = torch.nn.Linear(64, 10).to(dtype=torch.float)
     self.fc2 = torch.nn.Linear(10, 10).to(dtype=torch.float)
    def __init__(self):
        '''
    Init function to define the layers and loss function.
    '''
        super().__init__()

        self.quant = QuantStub()
        self.dequant = DeQuantStub()
예제 #3
0
 def __init__(self, qengine='fbgemm'):
     super(AnnotatedConvBnReLUModel, self).__init__()
     self.qconfig = torch.quantization.get_default_qconfig(qengine)
     self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
     self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float)
     self.relu = nn.ReLU(inplace=True)
     self.quant = QuantStub()
     self.dequant = DeQuantStub()
예제 #4
0
 def __init__(self):
     super(lane_detection_network, self).__init__()
     self.resizing = resize_layer(3, 32)
     # feature extraction
     self.layer1 = hourglass_block(32, 32)
     self.layer2 = hourglass_block(32, 32)
     self.quant = QuantStub()
     self.dequant = DeQuantStub()
예제 #5
0
 def __init__(self, qconfig):
     super(TestM, self).__init__()
     self.conv = nn.Conv2d(3, 1, 3).float()
     self.conv.weight.data.fill_(1.0)
     self.conv.bias.data.fill_(0.01)
     self.qconfig = qconfig
     self.quant = QuantStub()
     self.dequant = DeQuantStub()
예제 #6
0
    def __init__(self, config, mix_qkv=False):
        super(CTRLLMHeadModel, self).__init__(config)
        self.transformer = CTRLModel(config)
        self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=True)

        self.init_weights()
        self.quant = QuantStub()
        self.dequant = DeQuantStub()
예제 #7
0
 def __init__(self):
     super(ManualConvLinearQATModel, self).__init__()
     self.qconfig = default_qconfig
     self.quant = QuantStub()
     self.dequant = DeQuantStub()
     self.conv = torch.nn.Conv2d(3, 5, kernel_size=3).to(dtype=torch.float)
     self.fc1 = torch.nn.Linear(320, 10).to(dtype=torch.float)
     self.fc2 = torch.nn.Linear(10, 10).to(dtype=torch.float)
    def __init__(self,
                 block,
                 layers,
                 block_inplanes,
                 n_input_channels=3,
                 conv1_t_size=7,
                 conv1_t_stride=1,
                 no_max_pool=False,
                 shortcut_type='B',
                 widen_factor=1.0,
                 quantize=False,
                 n_classes=400):
        super().__init__()
        self.quantize = quantize
        self.quant = QuantStub()
        self.dequant = DeQuantStub()

        block_inplanes = [int(x * widen_factor) for x in block_inplanes]
        self.in_planes = block_inplanes[0]
        self.no_max_pool = no_max_pool
        self.conv1 = nn.Conv3d(n_input_channels,
                               self.in_planes,
                               kernel_size=(conv1_t_size, 7, 7),
                               stride=(conv1_t_stride, 2, 2),
                               padding=(conv1_t_size // 2, 3, 3),
                               bias=False)
        self.bn1 = nn.BatchNorm3d(self.in_planes)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool3d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, block_inplanes[0], layers[0],
                                       shortcut_type)
        self.layer2 = self._make_layer(block,
                                       block_inplanes[1],
                                       layers[1],
                                       shortcut_type,
                                       stride=2)
        self.layer3 = self._make_layer(block,
                                       block_inplanes[2],
                                       layers[2],
                                       shortcut_type,
                                       stride=2)
        self.layer4 = self._make_layer(block,
                                       block_inplanes[3],
                                       layers[3],
                                       shortcut_type,
                                       stride=2)

        self.avgpool = nn.AdaptiveAvgPool3d((1, 1, 1))
        self.fc = nn.Linear(block_inplanes[3] * block.expansion, n_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv3d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm3d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
예제 #9
0
 def __init__(self, *args, **kwargs):
     """
     MobileNet V3 main class
     Args:
        Inherits args from floating point MobileNetV3
     """
     super().__init__(*args, **kwargs)
     self.quant = QuantStub()
     self.dequant = DeQuantStub()
예제 #10
0
파일: models.py 프로젝트: intel/lpot
    def __init__(self, cfg, 
                 n_labels,
                 channels=[768, 100, 100, 100, 100, 100, 100, 100, 100],
                 kernel_sizes=[5, 5, 5, 5, 5, 5, 5, 5],
                 n_hidden_dense=100):
        super().__init__()
        self.quant = QuantStub()
        self.dequant = DeQuantStub()
        self.conv_quant = QuantStub()
        self.conv_dequant = DeQuantStub()

        self.embed = Embeddings(cfg)
        self.convs = nn.ModuleList([
            nn.Conv1d(n_in, n_out, k, padding=int(k/2))
            for n_in, n_out, k in zip(channels[:-1], channels[1:], kernel_sizes)
        ])
        self.dense = nn.Linear(channels[-1]*len(kernel_sizes), n_hidden_dense)
        self.classifier = nn.Linear(n_hidden_dense, n_labels)
예제 #11
0
 def __init__(self):
     super().__init__()
     self.conv1 = nn.Conv2d(3, 2, 5, bias=True).to(dtype=torch.float)
     self.bn1 = nn.BatchNorm2d(2).to(dtype=torch.float)
     self.relu1 = nn.ReLU(inplace=True).to(dtype=torch.float)
     self.conv2 = nn.Conv2d(2, 2, 1, bias=True).to(dtype=torch.float)
     self.bn2 = nn.BatchNorm2d(2).to(dtype=torch.float)
     self.quant = QuantStub()
     self.dequant = DeQuantStub()
예제 #12
0
    def __init__(self, **kwargs):
        super(PoseMobileNet, self).__init__()
        # 0. Initialize
        res_block = InvertedResidual
        inverted_residual_setting = [
                # ratio, channel, repeat n_times, stride
                # t, c, n, s
                [1, 16, 1, 1],
                [6, 24, 2, 2],
                [6, 32, 3, 2],
                [6, 64, 4, 2],
                [6, 96, 3, 1],
                [6, 160, 3, 2],
                [6, 320, 1, 1],
            ]

        self.input_channel = 3
        self.output_channel = 32
        self.output_channel = _make_divisible(self.output_channel)
        self.last_channel = 1280
        self.last_channel = _make_divisible(self.last_channel)

        # 1. Mobilenet v2 backbone
        # https://github.com/pytorch/vision/blob/master/torchvision/models/mobilenet.py
        features = [ConvBNReLU(self.input_channel, self.output_channel, stride=2)]
        self.input_channel = self.output_channel
        for t, c, n, s in inverted_residual_setting:
            self.output_channel = _make_divisible(c)
            for i in range(n):
                stride = s if i == 0 else 1
                features.append(res_block(self.input_channel, self.output_channel, stride, expand_ratio=t))
                self.input_channel = self.output_channel

        features.append(ConvBNReLU(self.input_channel, self.last_channel, kernel_size=1))
        self.features = nn.Sequential(*features)
        self.output_channel = self.last_channel

        # 2. Conv transpose layers
        self.conv_transpose_layers = self._make_conv_transpose_layer(
            num_layers = 3,
            filters = [128, 128, 128],
            kernals = [4, 4, 4]
        )

        # 3. Final conv layer
        self.final_layer = nn.Conv2d(
            in_channels = self.output_channel,
            out_channels = 21, # 21 keypoints
            kernel_size = 1,
            stride = 1,
            padding = 0
        )

        # 4. Two special layers for quantization
        self.quant = QuantStub()
        self.dequant = DeQuantStub()
예제 #13
0
    def __init__(self,
                 in_chs,
                 se_ratio=0.25,
                 reduced_base_chs=None,
                 act_layer=nn.ReLU,
                 gate_fn=sigmoid,
                 divisor=1):
        super(SqueezeExcite, self).__init__()
        self.gate_fn = gate_fn
        reduced_chs = make_divisible((reduced_base_chs or in_chs) * se_ratio,
                                     divisor)
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True)
        self.act1 = act_layer(inplace=True)
        self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True)

        self.quant_conv_reduce = QuantStub()
        self.quant_conv_expand = QuantStub()
        self.dequant = DeQuantStub()
 def __init__(self, nclass, backbone, pretrained_base=False, 
               dataset="city", width_multi = 1.0, **kwargs):
     super(_MobileNetV2Seg, self).__init__(nclass, backbone, pretrained_base, **kwargs)
     self.width_multi = width_multi
     in_channels = int(320//2*width_multi)
     inter_channels = int(24*width_multi)         
     self.head = _Head(nclass, in_channels,inter_channels, dataset = dataset, width_multi = width_multi,  **kwargs)
     self.quant = QuantStub()
     self.dequant1 = DeQuantStub()
     self.dequant2 = DeQuantStub()     
예제 #15
0
 def __init__(self, nclass, backbone, pretrained_base=False, 
              dataset ='city', crop_scale = 1.0, **kwargs):
     super(_MobileNetV3Seg, self).__init__(nclass, backbone, pretrained_base, **kwargs)
     mode = backbone.split('_')[-1]
     in_channels = 960//2 if mode == 'large' else 576//2
     inter_channels = 40 if mode.startswith('large') else 24    
     self.head = _Head(nclass, in_channels, inter_channels, mode = mode, dataset = dataset, **kwargs)
     self.quant = QuantStub()
     self.dequant1 = DeQuantStub()
     self.dequant2 = DeQuantStub()     
예제 #16
0
    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels

        self.transformer = SqueezeBertModel(config)
        self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)

        self.init_weights()
        self.quant = QuantStub()
        self.dequant = DeQuantStub()
예제 #17
0
 def __init__(self, *args, **kwargs):
     """
     MobileNet V2 main class
     Args:
        Inherits args from floating point MobileNetV2
     """
     super(QuantizableMobileNetV2, self).__init__(*args, **kwargs)
     self.quant = QuantStub()
     self.dequant = DeQuantStub()
     self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
예제 #18
0
 def __init__(self):
     super(ModelWithFunctionals, self).__init__()
     self.mycat = nnq.FloatFunctional()
     self.myadd = nnq.FloatFunctional()
     self.mymul = nnq.FloatFunctional()
     self.myadd_relu = nnq.FloatFunctional()
     self.my_scalar_add = nnq.FloatFunctional()
     self.my_scalar_mul = nnq.FloatFunctional()
     self.quant = QuantStub()
     self.dequant = DeQuantStub()
예제 #19
0
    def __init__(self, *args: Any, **kwargs: Any) -> None:
        """
        MobileNet V2 main class

        Args:
           Inherits args from floating point MobileNetV2
        """
        super(QuantizableMobileNetV2, self).__init__(*args, **kwargs)
        self.quant = QuantStub()
        self.dequant = DeQuantStub()
 def __init__(self, w, b, m, v):
     super(SimpleQuantizedBatchNormRelu, self).__init__()
     self.bn = torch.nn.BatchNorm3d(4)
     self.relu = torch.nn.ReLU()
     self.bn.weight = torch.nn.Parameter(w)
     self.bn.bias = torch.nn.Parameter(b)
     self.bn.running_mean = m
     self.bn.running_var = v
     self.q = QuantStub()
     self.dq = DeQuantStub()
예제 #21
0
    def __init__(self, channel):
        super().__init__()

        weight = torch.tensor([[1, 2, 1], [2, 4, 2], [1, 2, 1]], dtype=torch.float32)
        weight = weight.view(1, 1, 3, 3)
        weight = weight / weight.sum()
        weight_flip = torch.flip(weight, [2, 3])

        self.register_buffer('weight', weight.repeat(channel, 1, 1, 1))
        self.register_buffer('weight_flip', weight_flip.repeat(channel, 1, 1, 1))
        self.quant = QuantStub()
예제 #22
0
 def __init__(self, config_path, img_size=416):
     super(Darknet, self).__init__()
     self.module_defs = parse_model_config(config_path)
     self.hyperparams, self.module_list = create_modules(self.module_defs)
     self.yolo_layers = [layer[0] for layer in self.module_list if hasattr(layer[0], "metrics")]
     self.img_size = img_size
     self.seen = 0
     self.header_info = np.array([0, 0, 0, self.seen, 0], dtype=np.int32)
     self.add = torch.nn.quantized.FloatFunctional()
     self.quant = QuantStub()
     self.dequant = DeQuantStub()
예제 #23
0
    def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
                 groups=1, width_per_group=64, replace_stride_with_dilation=None,
                 norm_layer=None):
        super(ResNet, self).__init__()
        if norm_layer is None:
            norm_layer = nn.BatchNorm2d
        self._norm_layer = norm_layer

        self.inplanes = 64
        self.dilation = 1
        if replace_stride_with_dilation is None:
            # each element in the tuple indicates if we should replace
            # the 2x2 stride with a dilated convolution instead
            replace_stride_with_dilation = [False, False, False]
        if len(replace_stride_with_dilation) != 3:
            raise ValueError("replace_stride_with_dilation should be None "
                             "or a 3-element tuple, got {}".format(replace_stride_with_dilation))
        self.groups = groups
        self.base_width = width_per_group
        self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
                               bias=False)
        self.bn1 = norm_layer(self.inplanes)
        # self.relu = nn.ReLU(inplace=True)
        self.relu = nn.ReLU(inplace=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
                                       dilate=replace_stride_with_dilation[0])
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
                                       dilate=replace_stride_with_dilation[1])
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
                                       dilate=replace_stride_with_dilation[2])
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        self.quant = QuantStub()
        self.dequant = DeQuantStub()

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

        # Zero-initialize the last BN in each residual branch,
        # so that the residual branch starts with zeros, and each residual block behaves like an identity.
        # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
        if zero_init_residual:
            for m in self.modules():
                if isinstance(m, Bottleneck):
                    nn.init.constant_(m.bn3.weight, 0)
                elif isinstance(m, BasicBlock):
                    nn.init.constant_(m.bn2.weight, 0)
예제 #24
0
    def __init__(self, parametrization, classes=10, input_shape=None):
        """
        Initializes the network according to the parametrization passed.

        Args
        ----
        parametrization:
            parameters for the network building
        classes:
            number of classes for the final fully connected layer
        input_shape:
            default shape for the input
        channels:
            number of channels of the input: 1 is grey image, 3 in color

        """
        super(Net, self).__init__()
        self.input_shape = input_shape

        channels = self.input_shape[0]
        self.parametrization = parametrization
        # COnvolution blocks
        conv_blocks = []
        for j in range(1, parametrization.get("num_conv_blocks", 1) + 1):
            conv_blocks.append(self.create_conv_block(j, channels))
        self.conv_blocks = nn.Sequential(*conv_blocks)
        # fully connected blocks
        fc = []
        # Main branch
        self.n_size, self.odd_shape = self._get_conv_output(
            self.parametrization.get("batch_size", 4),
            self.input_shape,
            self._forward_features,
        )

        for i in range(1, parametrization.get("num_fc_layers", 1) + 1):
            fc = self.create_fc_block(fc, i, self.odd_shape[0])

        # Final Layer
        self.fc = nn.Sequential(*fc)
        classifier = []
        classifier.append(
            nn.Linear(
                parametrization.get(
                    "fc_weights_layer_"
                    + str(parametrization.get("num_fc_layers", 0)),
                    self.odd_shape[0],
                ),
                classes,
            )
        )
        self.classifier = nn.Sequential(*classifier)
        self.quant = QuantStub()
        self.dequant = DeQuantStub()
예제 #25
0
    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels

        self.transformer = SqueezeBertModel(config)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)

        self.init_weights()
        self.quant = QuantStub()
        self.dequant = DeQuantStub()
예제 #26
0
 def __init__(self, channel, reduction=4, add_stub=False):
     super(SqueezeExcite, self).__init__()
     self.avg_pool = nn.AdaptiveAvgPool2d(1)
     self.fc = nn.Sequential(
         nn.Linear(channel, channel // reduction, bias=False),
         nn.ReLU(inplace=True),
         nn.Linear(channel // reduction, channel, bias=False),
         Hsigmoid(add_stub=False))
     self.fmul = nn.quantized.FloatFunctional()
     self.quant = QuantStub()
     self.dequant = DeQuantStub()
     self.add_stub = add_stub
예제 #27
0
 def __init__(self):
     super(ModelWithFunctionals, self).__init__()
     self.mycat = nnq.FloatFunctional()
     self.myadd = nnq.FloatFunctional()
     self.myadd_relu = nnq.FloatFunctional()
     # Tracing doesnt work yet for c10 ops with scalar inputs
     # https://github.com/pytorch/pytorch/issues/27097
     self.my_scalar_add = nnq.FloatFunctional()
     self.mymul = nnq.FloatFunctional()
     self.my_scalar_mul = nnq.FloatFunctional()
     self.quant = QuantStub()
     self.dequant = DeQuantStub()
예제 #28
0
 def __init__(self, num_classes, is_pretrained=False, quantized=False):
     super(DenseNet121, self).__init__()
     self.quantize = quantized
     if self.quantize:
         self.quant = QuantStub()
     self.densenet121 = torchvision.models.densenet121(
         pretrained=is_pretrained)
     self.features = self.densenet121.features
     self.classifier = nn.Sequential(nn.Linear(1024, num_classes),
                                     nn.Sigmoid())
     if quantized:
         self.dequant = DeQuantStub()
예제 #29
0
def replace_forward(module):
    module.quant = QuantStub()
    module.dequant = DeQuantStub()
    raw_forward = module.forward

    def forward(x):
        x = module.quant(x)
        x = raw_forward(x)
        x = module.dequant(x)
        return x

    module.forward = forward
예제 #30
0
    def __init__(self,
                 num_classes=3,
                 encoder=None):  #use encoder to pass pretrained encoder
        super(Net, self).__init__()

        if (encoder == None):
            self.encoder = Encoder(num_classes)
        else:
            self.encoder = encoder
        self.decoder = Decoder(num_classes)
        self.quant = QuantStub()
        self.dequant = DeQuantStub()