예제 #1
0
 def __init__(self,
              num_classes=1000,
              width_mult=1.0,
              kw=4,
              ka=4,
              fp_layers=None,
              align_zero=True,
              use_channel_quant=False,
              use_ckpt=False,
              use_multi_domain=False):
     MobileNetV2.__init__(self, num_classes, width_mult)
     IDQ.__init__(self, MobileNetV2.forward, kw, ka, fp_layers, align_zero,
                  use_channel_quant, use_ckpt, use_multi_domain)
예제 #2
0
 def __init__(self, class_num=26):
     super(DigitsMobilenet, self).__init__()
     self.net = nn.Sequential(
         MobileNetV2(num_classes=class_num).features,
         nn.AdaptiveAvgPool2d((1, 1)))
     self.fc1 = nn.Linear(1280, class_num)
     self.fc2 = nn.Linear(1280, class_num)
     self.fc3 = nn.Linear(1280, class_num)
     self.fc4 = nn.Linear(1280, class_num)
     self.fc5 = nn.Linear(1280, class_num)
     self.fc6 = nn.Linear(1280, class_num)
     self.fc7 = nn.Linear(1280, class_num)
     self.fc8 = nn.Linear(1280, class_num)
     self.fc9 = nn.Linear(1280, class_num)
     self.fc10 = nn.Linear(1280, class_num)
     self.fc11 = nn.Linear(1280, class_num)
     self.fc12 = nn.Linear(1280, class_num)
     self.fc13 = nn.Linear(1280, class_num)
     self.fc14 = nn.Linear(1280, class_num)
     self.fc15 = nn.Linear(1280, class_num)
     self.fc16 = nn.Linear(1280, class_num)
     self.fc17 = nn.Linear(1280, class_num)
     self.fc18 = nn.Linear(1280, class_num)
     self.fc19 = nn.Linear(1280, class_num)
     self.fc20 = nn.Linear(1280, class_num)
     self.fc21 = nn.Linear(1280, class_num)
예제 #3
0
def main(width_mult):
    # model = MobileNetV2(1001, width_mult, 32, 1280, 'InvertedResidual', 0.2)
    model = MobileNetV2(width_mult=width_mult)
    print(model)
    flops, params = get_model_infos(model, (2, 3, 224, 224))
    print("FLOPs : {:}".format(flops))
    print("Params : {:}".format(params))
    print("-" * 50)
예제 #4
0
 def __init__(
     self,
     output_dims: List[int],
     blocks: int = 4,
     pretrained_settings: Optional[Dict[str,
                                        Union[str, int, float,
                                              List[Union[int,
                                                         float]]]]] = None,
     pretrained: bool = False,
     progress: bool = False,
     # width_mult: float = 1.0,
     # inverted_residual_setting: Optional[List[List[int]]] = None,
     # round_nearest: int = 8,
     # block: Optional[nn.Module] = None
 ):
     MobileNetV2.__init__(self)
     Encoder.__init__(self, output_dims, pretrained_settings, pretrained,
                      progress)
     # MobileNetV2.__init__(width_mult=width_mult, inverted_residual_setting=inverted_residual_setting,
     # round_nearest=round_nearest, block=block)
     self.blocks = blocks
예제 #5
0
def generate_model(cfg, name):
    pretrained = cfg.model.pretrained
    classes = cfg.model.classes
    if 'dropout' in cfg.model:
        dropout = cfg.model.dropout
    else:
        dropout = 0.2
    model = eval(f"models.{name}(pretrained={pretrained})")
    if classes != 1000:
        in_features = model.classifier[1].in_features
        model.classifier = nn.Sequential(
            nn.Dropout(p=dropout, inplace=True),
            nn.Linear(in_features, classes, bias=False))
    return MobileNetV2(model)
예제 #6
0
def mobilenet_v2(pretrained=False, progress=True, imagenet_pretrained=False,
                 num_classes=1, lin_features=512, dropout_prob=0.5,
                 bn_final=False, concat_pool=True, **kwargs):
    r"""MobileNetV2 model from
    `"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_.

    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
        progress (bool): If True, displays a progress bar of the download to stderr
        imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training)
        num_classes (int, optional): number of output classes
        lin_features (Union[int, list<int>], optional): number of nodes in intermediate layers of model's head
        dropout_prob (float, optional): dropout probability of head FC layers
        bn_final (bool, optional): should a batch norm be added after the last layer
        concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d`
        **kwargs: optional arguments of :mod:`torchvision.models.mobilenet.MobileNetV2`
    """

    # Model creation
    base_model = MobileNetV2(num_classes=num_classes, **kwargs)
    # Imagenet pretraining
    if imagenet_pretrained:
        if pretrained:
            raise ValueError('imagenet_pretrained cannot be set to True if pretrained=True')
        state_dict = load_state_dict_from_url(imagenet_urls['mobilenet_v2'],
                                              progress=progress)
        # Remove FC params from dict
        for key in ('classifier.1.weight', 'classifier.1.bias'):
            state_dict.pop(key, None)
        missing, unexpected = base_model.load_state_dict(state_dict, strict=False)
        if any(unexpected) or any(not elt.startswith('classifier.') for elt in missing):
            raise KeyError(f"Missing parameters: {missing}\nUnexpected parameters: {unexpected}")

    # Cut at last conv layers
    model = cnn_model(base_model, model_cut, base_model.classifier[1].in_features, num_classes,
                      lin_features, dropout_prob, bn_final=bn_final, concat_pool=concat_pool)

    # Parameter loading
    if pretrained:
        state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'],
                                              progress=progress)
        model.load_state_dict(state_dict)

    return model
예제 #7
0
def build_ssd(phase, size=300, num_classes=21):
    model = MobileNetV2()
    weights = torch.load('weights/mobilenet_v2-float.pth')
    model.load_state_dict(weights, strict=False)

    use_final_conv = True
    batch_norm_on_extra_layers = True
    final_feat_dim = 1280

    extras_ = add_extras_bn_group([32, 128, 32, 128, 32, 128, 'P'],
                                  [1, 2, 1, 2, 1, 2, 1],
                                  final_feat_dim,
                                  batch_norm=batch_norm_on_extra_layers)

    return SSD_MobileNetV2_Feat(size, model, extras_, [1, 3, 5, 6], use_final_conv), \
           SSD_MobileNetV2_HEAD(model,
                                add_extras_bn_group([32, 128, 32, 128, 32, 128, 'P'],
                                [1,2,1,2,1,2,1],
                                final_feat_dim,
                                batch_norm=batch_norm_on_extra_layers),
                                [4, 6, 6, 6, 4, 4],
                                [1, 3, 5, 6],
                                num_classes,
                                phase=phase)
예제 #8
0
 def __init__(self, numClasses=10):
     super(MobileNet_V2, self).__init__()
     self.model = MobileNetV2(num_classes=10)
예제 #9
0
            Inverted_Residual_Block(160, 320, s=1, t=6, alpha=alpha),
            nn.Conv2d(int(320 * alpha), int(alpha * 1280), 1, 1, 0,
                      bias=False),
            nn.BatchNorm2d(int(alpha * 1280)),
            nn.ReLU6(True)
        ]

        feature = nn.Sequential(*layers)
        return feature

    def _build_classifier(self, alpha):
        layers = [
            nn.AdaptiveAvgPool2d(output_size=(1, 1)),
            nn.Flatten(start_dim=1),
            nn.Linear(in_features=int(alpha * 1280), out_features=1000),
        ]

        classifier = nn.Sequential(*layers)
        return classifier


if __name__ == "__main__":
    from torchsummaryM import summary
    from torchvision.models.mobilenet import MobileNetV2

    model = MobileNetV2()
    summary(model, torch.zeros(1, 3, 224, 224))

    model = MobileNet_v2(alpha=1.4)
    summary(model, torch.zeros(1, 3, 224, 224))