def __init__(self, out_channels, stage_idxs, model_name, depth=5, **kwargs):
     inverted_residual_setting, last_channel = _mobilenet_v3_conf(model_name, kwargs)
     super().__init__(inverted_residual_setting, last_channel, **kwargs)
     
     self._depth = depth
     self._stage_idxs = stage_idxs
     self._out_channels = out_channels
     self._in_channels = 3
     
     del self.classifier
예제 #2
0
 def __init__(self,
              in_channels,
              width_mult: float = 1.0,
              reduced_tail: bool = False,
              dilated: bool = False):
     super().__init__(in_channels=in_channels,
                      inverted_residual_setting=_mobilenet_v3_conf(
                          'mobilenet_v3_large',
                          width_mult=width_mult,
                          reduced_tail=reduced_tail,
                          dilated=dilated)[0])
예제 #3
0
def mobilenet_v3_small(pretrained=False, progress=True, quantize=False, **kwargs):
    """
    Constructs a MobileNetV3 Small architecture from
    `"Searching for MobileNetV3" <https://arxiv.org/abs/1905.02244>`_.

    Note that quantize = True returns a quantized model with 8 bit
    weights. Quantized models only support inference and run on CPUs.
    GPU inference is not yet supported

    Args:
     pretrained (bool): If True, returns a model pre-trained on ImageNet.
     progress (bool): If True, displays a progress bar of the download to stderr
     quantize (bool): If True, returns a quantized model, else returns a float model
    """
    arch = "mobilenet_v3_small"
    inverted_residual_setting, last_channel = _mobilenet_v3_conf(arch, kwargs)
    return _mobilenet_v3_model(arch, inverted_residual_setting, last_channel, pretrained, progress, quantize, **kwargs)