def __init__(self, nc, n, ps=0.5):
     super().__init__()
     layers = [AdaptiveConcatPool2d(), Mish(), Flatten()] + \
         bn_drop_lin(nc*2, 512, True, ps, Mish()) + \
         bn_drop_lin(512, n, True, ps)
     self.fc = nn.Sequential(*layers)
     self._init_weight()
Ejemplo n.º 2
0
 def __init__(self, nc):
     super(PoolTileNetList, self).__init__()
     # net = torchvision.models.resnext50_32x4d(pretrained = True)
     net = torchvision.models.resnet34(pretrained = True)
     infeature = net.fc.in_features
     self.net1 = torch.nn.Sequential(*list(net.children())[:-2])
     self.head = nn.Sequential(AdaptiveConcatPool2d(),Flatten(), nn.Linear(infeature * 2,512), Mish(),nn.BatchNorm1d(512), nn.Dropout(0.5),nn.Linear(512,nc))
Ejemplo n.º 3
0
 def __init__(self, arch='resnext50_32x4d_ssl', n=6, pre=True):
     super().__init__()
     m = torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', arch)
     self.enc = nn.Sequential(*list(m.children())[:-2])       
     nc = list(m.children())[-1].in_features
     self.head = nn.Sequential(AdaptiveConcatPool2d(), Flatten(),nn.Linear(2*nc,512),
                         Mish(),nn.BatchNorm1d(512), nn.Dropout(0.5),nn.Linear(512,n))
Ejemplo n.º 4
0
    def __init__(self, encode_size, hidden_size, num_layers, bs, output_size):
        super().__init__()
        self.encode_size = encode_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.bs = bs
        self.output_size = output_size

        resnet_model = resnet34(pretrained=True)
        encoder_layers = list(resnet_model.children())[:8] + [
            AdaptiveConcatPool2d(), Flatten()
        ]
        self.encoder = nn.Sequential(*encoder_layers).cuda()
        for param in self.encoder.parameters():
            param.requires_grad = False
        set_trainable(self.encoder, False)  # fastai fit bug

        self.encoder_linear = nn.Sequential(nn.BatchNorm1d(1024),
                                            nn.Dropout(p=0.25),
                                            nn.Linear(1024, 512), nn.ReLU(),
                                            nn.BatchNorm1d(512),
                                            nn.Dropout(p=0.5),
                                            nn.Linear(512,
                                                      encode_size)).cuda()
        set_trainable(self.encoder_linear, True)  # fastai fit bug

        self.lstm = nn.LSTM(encode_size, hidden_size, num_layers).cuda()
        self.h, self.c = self.init_hidden()
        set_trainable(self.lstm, True)  # fastai fit bug

        self.linear = nn.Linear(hidden_size, output_size).cuda()
        set_trainable(self.linear, True)  # fastai fit bug

        self.init_weights()
Ejemplo n.º 5
0
    def __init__(self,
                 emb_szs,
                 n_cont,
                 out_sz,
                 layers,
                 emb_drop=0.,
                 window=24,
                 filters=[1, 2, 3, 4, 5, 6],
                 y_range=None,
                 use_bn=False,
                 ps=None,
                 bn_final=False):
        super().__init__()

        # TODO: Use the filters arg to generate the conv_layers dynamically
        # Wavenet model layers
        self.c1a = conv_layer(window=window // 2, ks=1, dilation=1)
        self.c1b = conv_layer(window=window // 4, ks=1, dilation=2)
        self.c2a = conv_layer(window=window // 2, ks=2, dilation=1)
        self.c2b = conv_layer(window=window // 4, ks=2, dilation=2)
        self.c3a = conv_layer(window=window // 2, ks=3, dilation=1)
        self.c3b = conv_layer(window=window // 4, ks=3, dilation=2)
        self.c4a = conv_layer(window=window // 2, ks=4, dilation=1)
        self.c4b = conv_layer(window=window // 4, ks=4, dilation=2)
        self.c5a = conv_layer(window=window // 2, ks=5, dilation=1)
        self.c5b = conv_layer(window=window // 4, ks=5, dilation=2)
        self.c6a = conv_layer(window=window // 2, ks=6, dilation=1)
        self.c6b = conv_layer(window=window // 4, ks=6, dilation=2)

        num_wave_outputs = (len(filters) * (window // 2)) + (len(filters) *
                                                             (window // 4))

        # Fastai's Mixed Input model
        ps = ifnone(ps, [0] * len(layers))
        ps = listify(ps, layers)
        self.embeds = nn.ModuleList([embedding(ni, nf) for ni, nf in emb_szs])
        self.emb_drop = nn.Dropout(emb_drop)
        self.bn_cont = nn.BatchNorm1d(n_cont)
        n_emb = sum(e.embedding_dim for e in self.embeds)
        self.n_emb, self.n_cont, self.y_range = n_emb, n_cont, y_range
        sizes = self.get_sizes(layers, out_sz)
        actns = [nn.ReLU(inplace=True)] * (len(sizes) - 2) + [None]
        layers = []
        for i, (n_in, n_out, dp, act) in enumerate(
                zip(sizes[:-2], sizes[1:-1], [0.] + ps, actns)):
            layers += bn_drop_lin(n_in,
                                  n_out,
                                  bn=use_bn and i != 0,
                                  p=dp,
                                  actn=act)
        if bn_final: layers.append(nn.BatchNorm1d(sizes[-1]))
        self.layers = nn.Sequential(*layers)

        # Final layer
        self.f = Flatten()
        self.lin = nn.Linear(sizes[-2] + num_wave_outputs, out_sz, bias=False)

        self.sizes = sizes
        self.num_wave_outputs = num_wave_outputs
Ejemplo n.º 6
0
    def __init__(self,
                 emb_szs,
                 n_cont,
                 emb_drop,
                 out_sz,
                 szs,
                 drops,
                 window=24,
                 filters=[1, 2, 3, 4, 5, 6],
                 y_range=None,
                 use_bn=False,
                 is_reg=True,
                 is_multi=False):
        super().__init__()

        # TODO: Use the filters arg to generate the conv_layers dynamically
        # Wavenet model layers
        self.c1a = conv_layer(window=window // 2, ks=1, dilation=1)
        self.c1b = conv_layer(window=window // 4, ks=1, dilation=2)
        self.c2a = conv_layer(window=window // 2, ks=2, dilation=1)
        self.c2b = conv_layer(window=window // 4, ks=2, dilation=2)
        self.c3a = conv_layer(window=window // 2, ks=3, dilation=1)
        self.c3b = conv_layer(window=window // 4, ks=3, dilation=2)
        self.c4a = conv_layer(window=window // 2, ks=4, dilation=1)
        self.c4b = conv_layer(window=window // 4, ks=4, dilation=2)
        self.c5a = conv_layer(window=window // 2, ks=5, dilation=1)
        self.c5b = conv_layer(window=window // 4, ks=5, dilation=2)
        self.c6a = conv_layer(window=window // 2, ks=6, dilation=1)
        self.c6b = conv_layer(window=window // 4, ks=6, dilation=2)

        num_wave_outputs = (len(filters) * (window // 2)) + (len(filters) *
                                                             (window // 4))

        # Fastai's Mixed Input model
        self.embs = nn.ModuleList([nn.Embedding(c, s) for c, s in emb_szs])
        for emb in self.embs:
            emb_init(emb)
        n_emb = sum(e.embedding_dim for e in self.embs)
        self.n_emb, self.n_cont = n_emb, n_cont

        szs = [n_emb + n_cont] + szs
        self.lins = nn.ModuleList(
            [nn.Linear(szs[i], szs[i + 1]) for i in range(len(szs) - 1)])
        self.bns = nn.ModuleList([nn.BatchNorm1d(sz) for sz in szs[1:]])
        for o in self.lins:
            kaiming_normal(o.weight.data)
        self.outp = nn.Linear(szs[-1], out_sz)
        kaiming_normal(self.outp.weight.data)

        self.emb_drop = nn.Dropout(emb_drop)
        self.drops = nn.ModuleList([nn.Dropout(drop) for drop in drops])
        self.bn = nn.BatchNorm1d(n_cont)
        self.use_bn, self.y_range = use_bn, y_range
        self.is_reg = is_reg
        self.is_multi = is_multi

        # Final layer
        self.f = Flatten()
        self.lin = nn.Linear(szs[-1] + num_wave_outputs, out_sz, bias=False)
Ejemplo n.º 7
0
    def make_head(self, block_ix):
        """End of model: Conv2D - AdaptiveAvgPool2d - Dropout - Linear"""
        n_in_filters = self.filters[block_ix + 1]
        n_filters = self.filters[block_ix + 2]

        head = [
            conv_layer(n_in_filters, n_filters, 1),
            nn.AdaptiveAvgPool2d(1)
        ]
        head += [Flatten(), nn.Dropout(0.2), nn.Linear(n_filters, self.c)]
        return head
Ejemplo n.º 8
0
def grad_penalty(inp, fake, critic):
    f = Flatten()
    epsilon = np.random.uniform(0, 1)
    interpolated = epsilon * inp + (1 - epsilon) * fake
    interpolated.requires_grad = True
    crit_interp = critic(interpolated)
    gradients = grad(outputs=crit_interp, inputs=interpolated,\
                                  grad_outputs=torch.ones_like(crit_interp),\
                                   create_graph=True, retain_graph=True, only_inputs=True)[0]
    gradients = f(gradients)
    gradients_norm = torch.sqrt(torch.sum(gradients**2, dim=1) + 1e-12)
    result = ((gradients_norm - 1)**2)
    return result
Ejemplo n.º 9
0
 def __init__(self, n_grps, N, k=1, drop=0.3, first_width=16):
     super().__init__()
     layers = [nn.Conv2d(1, first_width, kernel_size=3, padding=1, bias=False)]
     # Double feature depth at each group, after the first
     widths = [first_width]
     for grp in range(n_grps):
         widths.append(first_width*(2**grp)*k)
     for grp in range(n_grps):
         layers += self._make_group(N, widths[grp], widths[grp+1],
                                    (1 if grp == 0 else 2), drop)
     layers += [nn.BatchNorm2d(widths[-1]), nn.ReLU(inplace=True),
                nn.AdaptiveAvgPool2d(1), Flatten(),
                nn.Linear(widths[-1], 10)]
     self.features = nn.Sequential(*layers)
Ejemplo n.º 10
0
    def __init__(self):
        super().__init__()

        resnet_model = resnet34(pretrained=True)
        encoder_layers = list(resnet_model.children())[:8] + [
            AdaptiveConcatPool2d(), Flatten()
        ]
        self.encoder = nn.Sequential(*encoder_layers).cuda()
        for param in self.encoder.parameters():
            param.requires_grad = False
        set_trainable(self.encoder, False)  # fastai fit bug

        self.linear = nn.Sequential(nn.BatchNorm1d(1024), nn.Dropout(p=0.25),
                                    nn.Linear(1024, 512), nn.ReLU(),
                                    nn.BatchNorm1d(512), nn.Dropout(p=0.5),
                                    nn.Linear(512, 14)).cuda()
        apply_init(self.linear, kaiming_normal)
        set_trainable(self.linear, True)  # fastai fit bug
Ejemplo n.º 11
0
def get_learner(dls):
    model = torch.nn.Sequential(ConvLayer(3, 24, stride=2),
                                ConvLayer(24, 32, stride=2),
                                ConvLayer(32, 64, stride=2),
                                ConvLayer(64, 128, stride=2),
                                ConvLayer(128, 256, stride=2),
                                torch.nn.AdaptiveAvgPool2d(1), Flatten(),
                                torch.nn.Linear(256, 50), torch.nn.ReLU(),
                                torch.nn.Linear(50, dls.c), torch.nn.Tanh())
    #print(model)
    callbacks = ActivationStats(with_hist=True)
    learn = Learner(dls,
                    model,
                    loss_func=MSELossFlat(),
                    metrics=[rmse],
                    cbs=callbacks)
    #valley = learn.lr_find()
    return learn
    def __init__(self):
        super(Autoencoder, self).__init__()
        self.print_shape = True
        self.decode = True

        self.encoder = nn.Sequential(
            conv_layer(3, 8),  # 8, 32, 32
            nn.AvgPool2d(2, ceil_mode=True),  # 8, 16, 16
            conv_layer(8, 8),  # 8, 16, 16
            nn.AvgPool2d(2, ceil_mode=True),  # 8, 8, 8 -> 512
            Flatten(),
            nn.Linear(8 * 8 * 8, 4))
        self.decoder = nn.Sequential(
            nn.Linear(4, 8 * 8 * 8),
            ResizeBatch(8, 8, 8),
            PixelShuffle_ICNR(8, 8),  # 8*16*16
            nn.ReLU(True),
            conv_layer(8, 8),
            PixelShuffle_ICNR(8, 8),  # 8*16*16
            conv_layer(8, 3))
Ejemplo n.º 13
0
 def __init__(self, block, layers, num_classes, fully_conv=False, dropout=0):
     super().__init__()
     self.inplanes = 64 # ? 
     self.dropout = dropout
     print("Model head is fully conv?", fully_conv)
     
     features = [
         Lambda(lambda x: x.view(x.shape[0], 1, x.shape[1], x.shape[2])),
         conv(1, 64, ks=3, stride=2),
         bn(64),
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=2, stride=2, padding=1),
         self.make_layer(block, 64, layers[0], dropout=dropout),
         self.make_layer(block, 128, layers[1], stride=2, dropout=dropout),
         self.make_layer(block, 256, layers[2], stride=2, dropout=dropout),
         self.make_layer(block, 512, layers[3], stride=2, dropout=dropout),
     ]
     out_sz = 512 * block.expansion
     
     if fully_conv:
         features += [
             nn.Conv2d(out_sz, num_classes, 3, padding=1),
             Lambda(lambda x: x.view(x.shape[0], num_classes, -1)),
             Lambda(lambda x: torch.mean(x, dim=2))
         ]
     else:
         features += [
             nn.AdaptiveAvgPool2d(1),
             Flatten(),
             #nn.Dropout(0.1),
             nn.Linear(out_sz, num_classes)
         ]
     
     self.features = nn.Sequential(*features)
     
     for m in self.modules():
         if isinstance(m, nn.Conv2d):
             n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
             m.weight.data.normal_(0, math.sqrt(2. / n))
Ejemplo n.º 14
0
 def __init__(self, model: EfficientNet):
     super().__init__()
     _transform_Conv2dStaticSamePadding_to_Sequential(model)
     for block in model._blocks:
         _transform_Conv2dStaticSamePadding_to_Sequential(block)
     self._conv_stem = model._conv_stem
     self._bn0 = model._bn0
     self._swish0 = deepcopy(model._swish)
     blocks = []
     for idx, block in enumerate(model._blocks):
         _transform_Conv2dStaticSamePadding_to_Sequential(block)
         drop_connect_rate = model._global_params.drop_connect_rate
         if drop_connect_rate:
             drop_connect_rate *= float(idx) / len(model._blocks)
         blocks.append(MBConvBlockWrapper(block, drop_connect_rate))
     self._blocks = nn.Sequential(*blocks)
     self._conv_head = model._conv_head
     self._bn1 = model._bn1
     self._swish1 = deepcopy(model._swish)
     self._avg_pooling = model._avg_pooling
     self._flatten = Flatten()
     self._dropout = model._dropout
     self._fc = model._fc
Ejemplo n.º 15
0
 def __init__(self, nc):
     super(MyEfficientNet, self).__init__()
     self.net = EfficientNet.from_pretrained('efficientnet-b0')
     infeature = self.net._conv_head.out_channels
     self.head = nn.Sequential(AdaptiveConcatPool2d(),Flatten(), nn.Linear(infeature * 2,512), Mish(),nn.BatchNorm1d(512), nn.Dropout(0.5),nn.Linear(512,nc),MemoryEfficientSwish())