示例#1
0
    def __init__(self,
                 emb_szs,
                 n_cont,
                 out_sz,
                 layers,
                 emb_drop=0.,
                 window=24,
                 filters=[1, 2, 3, 4, 5, 6],
                 y_range=None,
                 use_bn=False,
                 ps=None,
                 bn_final=False):
        super().__init__()

        # TODO: Use the filters arg to generate the conv_layers dynamically
        # Wavenet model layers
        self.c1a = conv_layer(window=window // 2, ks=1, dilation=1)
        self.c1b = conv_layer(window=window // 4, ks=1, dilation=2)
        self.c2a = conv_layer(window=window // 2, ks=2, dilation=1)
        self.c2b = conv_layer(window=window // 4, ks=2, dilation=2)
        self.c3a = conv_layer(window=window // 2, ks=3, dilation=1)
        self.c3b = conv_layer(window=window // 4, ks=3, dilation=2)
        self.c4a = conv_layer(window=window // 2, ks=4, dilation=1)
        self.c4b = conv_layer(window=window // 4, ks=4, dilation=2)
        self.c5a = conv_layer(window=window // 2, ks=5, dilation=1)
        self.c5b = conv_layer(window=window // 4, ks=5, dilation=2)
        self.c6a = conv_layer(window=window // 2, ks=6, dilation=1)
        self.c6b = conv_layer(window=window // 4, ks=6, dilation=2)

        num_wave_outputs = (len(filters) * (window // 2)) + (len(filters) *
                                                             (window // 4))

        # Fastai's Mixed Input model
        ps = ifnone(ps, [0] * len(layers))
        ps = listify(ps, layers)
        self.embeds = nn.ModuleList([embedding(ni, nf) for ni, nf in emb_szs])
        self.emb_drop = nn.Dropout(emb_drop)
        self.bn_cont = nn.BatchNorm1d(n_cont)
        n_emb = sum(e.embedding_dim for e in self.embeds)
        self.n_emb, self.n_cont, self.y_range = n_emb, n_cont, y_range
        sizes = self.get_sizes(layers, out_sz)
        actns = [nn.ReLU(inplace=True)] * (len(sizes) - 2) + [None]
        layers = []
        for i, (n_in, n_out, dp, act) in enumerate(
                zip(sizes[:-2], sizes[1:-1], [0.] + ps, actns)):
            layers += bn_drop_lin(n_in,
                                  n_out,
                                  bn=use_bn and i != 0,
                                  p=dp,
                                  actn=act)
        if bn_final: layers.append(nn.BatchNorm1d(sizes[-1]))
        self.layers = nn.Sequential(*layers)

        # Final layer
        self.f = Flatten()
        self.lin = nn.Linear(sizes[-2] + num_wave_outputs, out_sz, bias=False)

        self.sizes = sizes
        self.num_wave_outputs = num_wave_outputs
示例#2
0
 def __init__(self, fpn, classifier, n_obj):
     super().__init__()
     self.fpn = fpn
     self.classifier = classifier
     self.embeddings = nn.ModuleList([embedding(1, d) for d in mod_dims])
     self.output_conv = nn.Sequential(nnlayers.conv_layer(n_obj, 128), nnlayers.conv_layer(128, 128))
     self.register_buffer('inst', torch.zeros(1, dtype=torch.long))
     self.n_obj = n_obj
示例#3
0
 def __init__(self, head, fpn, classifier, n_obj, n_iter=1):
     super().__init__()
     self.head = head
     self.fpn = fpn
     self.classifier = classifier
     e_dims = [n_obj] + mod_dims
     self.embeddings = nn.ModuleList([embedding(n_iter, d) for d in e_dims])
     self.register_buffer('inst', torch.arange(n_iter))
     self.n_obj = n_obj
     self.output_conv = nn.Conv2d(n_obj, 64, kernel_size=1, bias=False)
     self.down_conv = nn.Conv2d(n_obj, 256, kernel_size=1, bias=False)
     self._init()
示例#4
0
 def __init__(self, instructor, fpn, classifier):
     super().__init__()
     self.fpn = fpn
     self.classifier = classifier
     self.embeddings = nn.ModuleList([embedding(instructor.tree.n_obj, d) for d in mod_dims])
     self.instructor = instructor
示例#5
0
 def __init__(self, keys, embedding_dim):
     super().__init__()
     self.embedding = embedding(len(keys), embedding_dim)
     self.keys = list(keys)