def create_cnn_model(base_arch,
                     nc: int,
                     cut: Union[int, Callable] = None,
                     pretrained: bool = True,
                     lin_ftrs: Optional[Collection[int]] = None,
                     ps: Floats = 0.5,
                     custom_head: Optional[nn.Module] = None,
                     bn_final: bool = False,
                     concat_pool: bool = True):
    "Create custom convnet architecture"

    print(type(base_arch), base_arch)
    body = create_body(base_arch, pretrained, cut)
    # body.requires_grad = True

    nf = num_features_model(
        nn.Sequential(*body.children())) * (2 if concat_pool else 1)

    head = create_head(nf,
                       nc,
                       ps=ps,
                       concat_pool=concat_pool,
                       bn_final=bn_final)

    model = nn.Sequential(body, head)

    return model
Exemple #2
0
def create_cnn(data, arch, pretrained=False, is_mono_input=True, **kwargs):
    meta = cnn_config(arch)
    body = create_body(arch, pretrained)

    # sum up the weights of in_channels axis, to reduce to single input channel
    # Suggestion by David Gutman
    # https://forums.fast.ai/t/black-and-white-images-on-vgg16/2479/2
    if is_mono_input:
        first_conv_layer = body[0]
        first_conv_weights = first_conv_layer.state_dict()['weight']
        assert first_conv_weights.size(1) == 3  # RGB channels dim
        summed_weights = torch.sum(first_conv_weights, dim=1, keepdim=True)
        first_conv_layer.weight.data = summed_weights
        first_conv_layer.in_channels = 1
    else:
        # In this case, the input is a stereo
        first_conv_layer = body[0]
        first_conv_weights = first_conv_layer.state_dict()['weight']
        assert first_conv_weights.size(1) == 3  # RGB channels dim
        summed_weights = torch.sum(first_conv_weights, dim=1, keepdim=True)
        first_conv_layer.weight.data = first_conv_weights[:, :
                                                          2, :, :]  # Keep only 2 channels for the weights
        first_conv_layer.in_channels = 2

    nf = num_features_model(body) * 2
    head = create_head(nf, data.c, None, 0.5)
    model = nn.Sequential(body, head)
    learn = Learner(data, model, **kwargs)
    learn.split(meta['split'])
    if pretrained:
        learn.freeze()
    apply_init(model[1], nn.init.kaiming_normal_)
    return learn
Exemple #3
0
    def __init__(self,
                 base_arch,
                 no_diseases,
                 dropout=0.5,
                 init=nn.init.kaiming_normal_):
        super(CNNPretrainedModel, self).__init__()

        self.body = create_body(base_arch, pretrained=True)
        nf = num_features_model(nn.Sequential(*self.body.children())) * 2

        self.disease_head = create_head(nf,
                                        no_diseases,
                                        ps=0.5,
                                        concat_pool=True,
                                        bn_final=False)
        #self.age_head = create_head(nf, 1, ps=0.5, concat_pool=True, bn_final=False)
        self.gender_head = create_head(nf,
                                       2,
                                       ps=0.5,
                                       concat_pool=True,
                                       bn_final=False)
        #self.projection_head = create_head(nf, 3, ps=0.5, concat_pool=True, bn_final=False)

        self.disease_model = nn.Sequential(self.body, self.disease_head)

        self.meta = cnn_config(base_arch)
        self.split(self.meta['split'])
        self.freeze()

        apply_init(self.disease_head, init)
        #apply_init(self.age_head, init)
        apply_init(self.gender_head, init)
Exemple #4
0
 def __init__(self, config):
     super().__init__()
     self.n_emb = config.model.n_emb
     self.radius = config.model.radius
     self.n_class = config.model.n_class
     self.body = get_body(config)
     nf = num_features_model(nn.Sequential(*self.body.children())) * 2
     self.head = create_head(nf, self.n_emb, lin_ftrs=[1024], ps=config.model.drop_rate, concat_pool=True, bn_final=True)
     self.cos_sim = CosSimCenters(self.n_emb, self.n_class)
Exemple #5
0
    def __init__(self,
                 encoder: nn.Module,
                 n_classes,
                 final_bias: float = 0.,
                 n_conv: float = 4,
                 chs=256,
                 n_anchors=9,
                 flatten=True,
                 sizes=None):
        super().__init__(encoder, n_classes, final_bias, n_conv, chs,
                         n_anchors, flatten, sizes)

        self.classifier_image = self._create_image_classifier(
            nf=num_features_model(self.encoder) * 2,
            nc=1,
            y_range=[0 - 0.5, n_classes - 0.5])

        self.box_reg_classifier = self._head_box_reg_subnet(
            n_classes=1,
            n_anchors=n_anchors,
            n_conv=n_conv,
            chs=chs,
            y_range=[0 - 0.5, n_classes - 0.5])
def create_cnn_2(arch: Callable,
                 n_classes: int = 2,
                 cut: Union[int, Callable] = None,
                 pretrained: bool = True,
                 lin_ftrs: Optional[Collection[int]] = None,
                 ps: Floats = 0.5,
                 custom_head: Optional[nn.Module] = None,
                 split_on: Optional[SplitFuncOrIdxList] = None,
                 classification: bool = True,
                 **kwargs: Any) -> Learner:
    "Build convnet style learners."
    assert classification, 'Regression CNN not implemented yet, bug us on the forums if you want this!'
    meta = {'cut': -2, 'split': _resnet_split_2}
    body = create_body(arch(pretrained), ifnone(cut, meta['cut']))
    nf = num_features_model(body) * 2
    head = custom_head or create_head(nf, n_classes, lin_ftrs, ps)
    model = nn.Sequential(body, head)
    #learner_cls = ifnone(data.learner_type(), ClassificationLearner_2)
    learner_cls = ClassificationLearner_2
    learn = learner_cls(data, model, **kwargs)
    learn.split(ifnone(split_on, meta['split']))
    if pretrained: learn.freeze()
    apply_init(model[1], nn.init.kaiming_normal_)
    return learn
Exemple #7
0
 def __init__(self, arch=models.resnet18):
     super().__init__()
     self.cnn = create_body(arch)
     self.head = create_head(num_features_model(self.cnn) * 2, 4)
Exemple #8
0
def resnet(classes):
    base_model = models.resnet50(pretrained=True)
    body = nn.Sequential(*list(base_model.children())[:-2])
    nf = num_features_model(body) * 2
    head = create_head(nf, classes, None, ps=0.5, bn_final=False)
    return nn.Sequential(body, head, nn.LogSoftmax())