コード例 #1
0
ファイル: ff.py プロジェクト: sskram/nn-toolbox
 def __init__(
         self, in_channels:int, out_features:int, pool_output_size:int,
         hidden_layer_sizes:Sequence=(512,), activation:nn.Module=nn.ReLU,
         normalization=nn.BatchNorm1d, bn_final:bool=False, drop_p=0.5
 ):
     layers = [AdaptiveConcatPool2d(sz=pool_output_size), Flatten()]
     for i in range(len(hidden_layer_sizes)):
         if i == 0:
             in_features = in_channels * 2 * pool_output_size * pool_output_size
         else:
             in_features = hidden_layer_sizes[i - 1]
         layers.append(normalization(num_features=in_features))
         if drop_p != 0:
             layers.append(nn.Dropout(p=drop_p / 2))
         layers.append(nn.Linear(
             in_features=in_features,
             out_features=hidden_layer_sizes[i]
         ))
         layers.append(activation())
     if bn_final:
         layers.append(normalization(num_features=hidden_layer_sizes[-1], momentum=0.001)) #follows fast ai
     if drop_p != 0:
         layers.append(nn.Dropout(p=drop_p))
     layers.append(nn.Linear(in_features=hidden_layer_sizes[-1], out_features=out_features))
     super(FeedforwardBlock, self).__init__(*layers)
コード例 #2
0
 def __init__(self, in_features, NUM_CATEGORIES, NUM_GENERA, NUM_FAMILIES):
     super(Head, self).__init__()
     self.cat_pool = AdaptiveConcatPool2d()
     self.flat = nn.Flatten()
     # self.bn1 = nn.BatchNorm1d(
     #     num_features=2*in_features, eps=1e-05, momentum=0.1, 
     #     affine=True, track_running_stats=True
     # )
     # self.dp1 = nn.Dropout(p=0.5)
     self.linear = nn.Linear(
         in_features=2*in_features, 
         out_features=in_features, bias=True
     )
     self.relu = nn.ReLU()
     # self.bn2 = nn.BatchNorm1d(
     #     num_features=in_features, eps=1e-05, momentum=0.1,
     #     affine=True, track_running_stats=True
     # )
     # self.dp2 = nn.Dropout(p=0.5)
     self.categories_layer = nn.Linear(
         in_features, NUM_CATEGORIES
     )
     self.genera_layer = nn.Linear(
         in_features, NUM_GENERA
     )
     self.families_level = nn.Linear(
         in_features, NUM_FAMILIES
     )
コード例 #3
0
 def __init__(self, nc):
     super(PoolTileNetList, self).__init__()
     # net = torchvision.models.resnext50_32x4d(pretrained = True)
     net = torchvision.models.resnet34(pretrained = True)
     infeature = net.fc.in_features
     self.net1 = torch.nn.Sequential(*list(net.children())[:-2])
     self.head = nn.Sequential(AdaptiveConcatPool2d(),Flatten(), nn.Linear(infeature * 2,512), Mish(),nn.BatchNorm1d(512), nn.Dropout(0.5),nn.Linear(512,nc))
コード例 #4
0
 def __init__(self, nc, n, ps=0.5):
     super().__init__()
     layers = [AdaptiveConcatPool2d(), Mish(), Flatten()] + \
         bn_drop_lin(nc*2, 512, True, ps, Mish()) + \
         bn_drop_lin(512, n, True, ps)
     self.fc = nn.Sequential(*layers)
     self._init_weight()
コード例 #5
0
 def __init__(self, arch='resnext50_32x4d_ssl', n=6, pre=True):
     super().__init__()
     m = torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', arch)
     self.enc = nn.Sequential(*list(m.children())[:-2])       
     nc = list(m.children())[-1].in_features
     self.head = nn.Sequential(AdaptiveConcatPool2d(), Flatten(),nn.Linear(2*nc,512),
                         Mish(),nn.BatchNorm1d(512), nn.Dropout(0.5),nn.Linear(512,n))
コード例 #6
0
    def __init__(self, encode_size, hidden_size, num_layers, bs, output_size):
        super().__init__()
        self.encode_size = encode_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.bs = bs
        self.output_size = output_size

        resnet_model = resnet34(pretrained=True)
        encoder_layers = list(resnet_model.children())[:8] + [
            AdaptiveConcatPool2d(), Flatten()
        ]
        self.encoder = nn.Sequential(*encoder_layers).cuda()
        for param in self.encoder.parameters():
            param.requires_grad = False
        set_trainable(self.encoder, False)  # fastai fit bug

        self.encoder_linear = nn.Sequential(nn.BatchNorm1d(1024),
                                            nn.Dropout(p=0.25),
                                            nn.Linear(1024, 512), nn.ReLU(),
                                            nn.BatchNorm1d(512),
                                            nn.Dropout(p=0.5),
                                            nn.Linear(512,
                                                      encode_size)).cuda()
        set_trainable(self.encoder_linear, True)  # fastai fit bug

        self.lstm = nn.LSTM(encode_size, hidden_size, num_layers).cuda()
        self.h, self.c = self.init_hidden()
        set_trainable(self.lstm, True)  # fastai fit bug

        self.linear = nn.Linear(hidden_size, output_size).cuda()
        set_trainable(self.linear, True)  # fastai fit bug

        self.init_weights()
コード例 #7
0
    def __init__(self):
        super().__init__()

        resnet_model = resnet34(pretrained=True)
        encoder_layers = list(resnet_model.children())[:8] + [
            AdaptiveConcatPool2d(), Flatten()
        ]
        self.encoder = nn.Sequential(*encoder_layers).cuda()
        for param in self.encoder.parameters():
            param.requires_grad = False
        set_trainable(self.encoder, False)  # fastai fit bug

        self.linear = nn.Sequential(nn.BatchNorm1d(1024), nn.Dropout(p=0.25),
                                    nn.Linear(1024, 512), nn.ReLU(),
                                    nn.BatchNorm1d(512), nn.Dropout(p=0.5),
                                    nn.Linear(512, 14)).cuda()
        apply_init(self.linear, kaiming_normal)
        set_trainable(self.linear, True)  # fastai fit bug
コード例 #8
0
 def __init__(self, nc):
     super(MyEfficientNet, self).__init__()
     self.net = EfficientNet.from_pretrained('efficientnet-b0')
     infeature = self.net._conv_head.out_channels
     self.head = nn.Sequential(AdaptiveConcatPool2d(),Flatten(), nn.Linear(infeature * 2,512), Mish(),nn.BatchNorm1d(512), nn.Dropout(0.5),nn.Linear(512,nc),MemoryEfficientSwish())
コード例 #9
0
    # Check that the IR is well formed
    onnx.checker.check_model(onnx_model)

    # Print a human readable representation of the graph
    #     onnx.helper.printable_graph(onnx_model.graph)

    return onnx_model


class ImageScale(nn.Module):
    def __init__(self):
        super().__init__()
        self.denorminator = torch.full((3, 299, 299),
                                       255.0,
                                       device=torch.device("cuda"))

    def forward(self, x):
        return torch.div(x, self.denorminator).unsqueeze(0)


def make_fastai_be_coreml_compatible(learner):
    # Suggestion for using softmax didn't work for me
    #final_model = [ImageScale()] + (list(learn.model.children())[:-1] + [nn.Softmax()])
    return nn.Sequential(ImageScale(), *learner.model)


if __name__ == "__main__":
    foo = AdaptiveConcatPool2d()
    monkeypatch_fastai_for_onnx()
    bar = AdaptiveConcatPool2d()