def __init__(self, num_classes, aux_logits=True, transform_input=False, num_channels=4): super(SEInception3, self).__init__() model = Inception3(num_classes=num_classes, aux_logits=aux_logits, transform_input=transform_input) model.Mixed_5b.add_module("SELayer", SELayer(192)) model.Mixed_5c.add_module("SELayer", SELayer(256)) model.Mixed_5d.add_module("SELayer", SELayer(288)) model.Mixed_6a.add_module("SELayer", SELayer(288)) model.Mixed_6b.add_module("SELayer", SELayer(768)) model.Mixed_6c.add_module("SELayer", SELayer(768)) model.Mixed_6d.add_module("SELayer", SELayer(768)) model.Mixed_6e.add_module("SELayer", SELayer(768)) if aux_logits: model.AuxLogits.add_module("SELayer", SELayer(768)) model.Mixed_7a.add_module("SELayer", SELayer(768)) model.Mixed_7b.add_module("SELayer", SELayer(1280)) model.Mixed_7c.add_module("SELayer", SELayer(2048)) if num_channels == 4: model.Conv2d_1a_3x3.conv = nn.Conv2d(4, 32, kernel_size=(3, 3), stride=(2, 2), bias=False) self.model = model
def __init__(self, pretrained=True, num_classes=1000, aux_logits=True, transform_input=False): Inception3.__init__(self, num_classes=num_classes, aux_logits=aux_logits, transform_input=transform_input) if pretrained: self.load_state_dict( model_zoo.load_url(inception_urls['inception_v3_google'])) self.aux_logit = False self.filter = USM(in_channels=3, kernel_size=5, fixed_coeff=True, sigma=1.667, cuda=True, requires_grad=True)
def __init__(self, num_class=2, freeze=False, pretrain=True): super(inception_v3_pretrain, self).__init__() self.num_class = num_class self.model = Inception3(aux_logits=True, transform_input=False) if pretrain: self.model.load_state_dict(model_zoo.load_url(URL)) self.sigmoid = nn.Sigmoid() # freeze the model if freeze: for param in self.model.parameters(): param.requires_grad = False in_features = self.model.fc.in_features in_features_aux = self.model.AuxLogits.fc.in_features self.model.fc = nn.Linear(in_features, 1) self.model.AuxLogits.fc = nn.Linear(in_features_aux, 1)
def __init__(self, num_classes, aux_logits=True, transform_input=False): super(SEInception3, self).__init__() model = Inception3(num_classes=num_classes, aux_logits=aux_logits, transform_input=transform_input) model.Mixed_5b.add_module("SELayer", SELayer(192)) model.Mixed_5c.add_module("SELayer", SELayer(256)) model.Mixed_5d.add_module("SELayer", SELayer(288)) model.Mixed_6a.add_module("SELayer", SELayer(288)) model.Mixed_6b.add_module("SELayer", SELayer(768)) model.Mixed_6c.add_module("SELayer", SELayer(768)) model.Mixed_6d.add_module("SELayer", SELayer(768)) model.Mixed_6e.add_module("SELayer", SELayer(768)) if aux_logits: model.AuxLogits.add_module("SELayer", SELayer(768)) model.Mixed_7a.add_module("SELayer", SELayer(768)) model.Mixed_7b.add_module("SELayer", SELayer(1280)) model.Mixed_7c.add_module("SELayer", SELayer(2048)) self.model = model
def load_inception( resize: bool = True, normalizer: Optional[Callable] = None ) -> Tuple[nn.Module, torch.device]: file_ = os.path.join(INFO_PATH, INCEPTION_V3) if not os.path.exists(file_): print(">>> Inception model is not found. Download from url ...") inception_model = inception_v3(pretrained=True, transform_input=False) device = gpu(inception_model) torch.save(inception_model.state_dict(), file_) else: print(f">>> Inception model is found: {file_} ...") inception_model = Inception3(transform_input=False) device = gpu(inception_model) load(model=inception_model, path=INFO_PATH, filename=INCEPTION_V3, device=device) model = _Net(arch=inception_model, resize=resize, normalizer=normalizer) return model, device
def se_inception_v3(**kwargs): return Inception3(**kwargs)
def Inceptionv3_Net(n_classes, **kwargs): if Params.isTrue('UseSqueezeExcitation'): return Inception3(n_classes, inception_blocks=WrappedBlocks, **kwargs) else: return Inception3(n_classes, **kwargs)