コード例 #1
0
ファイル: atisseqtrans.py プロジェクト: lukovnikov/teafacto
 def apply(self, x):     # x: (batsize, seqlen)
     emb = self.E[x]     # (batsize, seqlen, inpembdim)
     outs = T.tensordot(emb, self.W, 1)  # (batsize, seqlen, outdim)
     outsf = outs.reshape((outs.shape[0] * outs.shape[1], outs.shape[2]))    # (batsize*seqlen, outdim)
     outsfsm = Softmax()(outsf)
     ret = outsfsm.reshape(outs.shape)   # (batsize, seqlen, outdim)
     return ret
コード例 #2
0
 def apply(self, x):  # x: (batsize, seqlen)
     emb = self.E[x]  # (batsize, seqlen, inpembdim)
     outs = T.tensordot(emb, self.W, 1)  # (batsize, seqlen, outdim)
     outsf = outs.reshape((outs.shape[0] * outs.shape[1],
                           outs.shape[2]))  # (batsize*seqlen, outdim)
     outsfsm = Softmax()(outsf)
     ret = outsfsm.reshape(outs.shape)  # (batsize, seqlen, outdim)
     return ret
コード例 #3
0
ファイル: memory.py プロジェクト: zhongyunuestc/teafacto
 def __init__(self, memblock, memaddr, memattdim=100, indim=None, outnorm=Softmax(), **kw):
     super(MemoryStack, self).__init__(**kw)
     if not isinstance(memblock, MemoryBlock):
         raise Exception("must provide a loaded memory block")
     memdim = memblock.outdim
     indim = memdim if indim is None else indim
     self.exe = stack(memaddr(memblock, memdim=memdim, indim=indim, attdim=memattdim), outnorm)
コード例 #4
0
 def __init__(self, encdim, invocsize, outvocsize, innerdim, seqlen, **kw):
     super(idx2seqStupid, self).__init__(**kw)
     self.encdim = encdim
     self.invocsize = invocsize
     self.outvocsize = outvocsize
     self.innerdim = innerdim
     self.seqlen = seqlen
     self.emb = VectorEmbed(indim=self.invocsize,
                            dim=self.encdim,
                            normalize=True)
     self.aletter = stack(Lin(indim=self.encdim, dim=self.outvocsize),
                          Softmax())
     self.bletter = stack(Lin(indim=self.encdim, dim=self.outvocsize),
                          Softmax())
     self.cletter = stack(Lin(indim=self.encdim, dim=self.outvocsize),
                          Softmax())
コード例 #5
0
 def __init__(self,
              layers,
              softmaxoutblock=None,
              innerdim=None,
              attention=None,
              inconcat=True,
              outconcat=False,
              dropout=False,
              **kw):
     super(SeqDecoder, self).__init__(**kw)
     self.embedder = layers[0]
     self.block = RecStack(*layers[1:])
     self.outdim = innerdim
     self.attention = attention
     self.inconcat = inconcat
     self.outconcat = outconcat
     self._mask = False
     self._attention = None
     assert (isinstance(self.block, ReccableBlock))
     if softmaxoutblock is None:  # default softmax out block
         sm = Softmax()
         self.lin = Linear(indim=self.outdim,
                           dim=self.embedder.indim,
                           dropout=dropout)
         self.softmaxoutblock = asblock(lambda x: sm(self.lin(x)))
     elif softmaxoutblock is False:
         self.softmaxoutblock = asblock(lambda x: x)
     else:
         self.softmaxoutblock = softmaxoutblock
コード例 #6
0
ファイル: test_stack.py プロジェクト: Natty307/teafacto
 def setUp(self):
     dim = 50
     self.vocabsize = 2000
     data = np.arange(0, self.vocabsize).astype("int32")
     self.O = param((dim, self.vocabsize)).uniform()
     self.W = VectorEmbed(indim=self.vocabsize, dim=50)
     self.out = stack(self.W, asblock(lambda x: T.dot(self.O, x)),
                      Softmax())(Input(ndim=1, dtype="int32"))
コード例 #7
0
 def __init__(self, numin, *dims, **kw):
     super(Model, self).__init__(**kw)
     self.layers = []
     dims = list(dims)
     dims = [numin] + dims
     for i in range(1, len(dims)):
         self.layers.append(Linear(indim=dims[i - 1], dim=dims[i]))
         self.layers.append(Tanh())
     self.layers[-1] = Softmax()
コード例 #8
0
 def apply(self, x):
     emb = self.recembed(x) if self.scanemb else self.E[
         x]  # (batsize, seqlen, inpembdim)
     outs = self.recout(emb) if self.scanout else T.tensordot(
         emb, self.W, 1)
     ret = self.recret(outs) if self.scansm else Softmax()(outs.reshape(
         (outs.shape[0] * outs.shape[1],
          outs.shape[2]))).reshape(outs.shape)  # (batsize*seqlen, outdim)
     return ret
コード例 #9
0
 def __init__(self, outlayers, **kw):
     super(Vec2Idx, self).__init__(**kw)
     if isinstance(outlayers, MemoryStack):
         out = outlayers
     else:
         if not issequence(outlayers):
             outlayers = [outlayers]
         if type(outlayers[-1]) is not Softmax:
             outlayers.append(Softmax())
         out = stack(*outlayers)
     self.out = out
コード例 #10
0
ファイル: attention.py プロジェクト: nilesh-c/teafacto
 def apply(
     self,
     criterion,
     data,
     mask=None
 ):  # criterion: (batsize, indim), data: (batsize, seqlen, memdim)
     o = self.getscores(criterion, data)  # (batsize, seqlen)
     o = Softmax()(o)
     if mask is not None:  # {0,1}^(batsize, seqlen)
         o = mask * o
     return o
コード例 #11
0
 def __init__(self, embedder, *layers, **kw):
     """ layers must have an embedding layers first, final softmax layer is added automatically"""
     assert ("smodim" in kw and "outdim" in kw)
     self.embedder = embedder
     smodim = kw["smodim"]
     outdim = kw["outdim"]
     del kw["smodim"]
     del kw["outdim"]
     super(SeqTransducer, self).__init__(**kw)
     self.block = RecStack(*(layers +
                             (Lin(indim=smodim, dim=outdim), Softmax())))
コード例 #12
0
 def __init__(self, *layers, **kw):
     """ first two layers must be embedding layers. Final softmax is added automatically"""
     assert ("smodim" in kw and "outdim" in kw)
     smodim = kw["smodim"]
     outdim = kw["outdim"]
     del kw["smodim"]
     del kw["outdim"]
     super(SeqTransDec, self).__init__(**kw)
     self.inpemb = layers[0]
     self.outemb = layers[1]
     self.block = RecStack(*(layers[2:] +
                             (Lin(indim=smodim, dim=outdim), Softmax())))
コード例 #13
0
    def apply(self, x):
        E = self.E
        W = self.W
        sm = Softmax()

        def rec(x_t):
            emb = E[x_t]
            outs = T.dot(emb, W)
            return sm(outs)

        o, _ = T.scan(fn=rec, sequences=x.dimshuffle(1, 0), outputs_info=None)
        return o.dimshuffle(1, 0, 2)
コード例 #14
0
ファイル: enc.py プロジェクト: zhongyunuestc/teafacto
 def apply(self, x, mask=None, weights=None):
     ret = self.enc(x, mask=mask,
                    weights=weights)  # (batsize, seqlen, lastdim)
     outs = []
     # apply mask    (SeqEncoder should attach mask to outvar if all_outputs()
     mask = ret.mask
     for i in range(self.numouts):
         selfweights = Softmax()(ret[:, :, i])  # (batsize, seqlen)
         selfweights *= mask  # apply mask
         selfweights = selfweights / T.sum(selfweights, axis=1).dimshuffle(
             0, "x")  # renormalize
         weightedstates = ret[:, :, self.numouts:] * selfweights.dimshuffle(
             0, 1, "x")
         out = T.sum(weightedstates, axis=1)  # (batsize, lastdim)
         outs.append(out)
     if self.mode == "concat":
         ret = T.concatenate(outs, axis=1)
     elif self.mode == "seq":
         outs = [out.dimshuffle(0, "x", 1) for out in outs]
         ret = T.concatenate(outs, axis=1)
     return ret
コード例 #15
0
ファイル: fb5.py プロジェクト: nilesh-c/teafacto
 def apply(self, x):  # x: (batsize, 4)
     o, _ = T.scan(fn=self.rec,
                   sequences=[x[:, 0], x[:, 1], x[:, 2]],
                   non_sequences=[self.xes, self.yes, self.divmul],
                   outputs_info=None)  # (batsize, outdim)
     #axes = T.tile(x[:, 0], (self.xes.shape[0], 1)).T
     #ayes = T.tile(x[:, 1], (self.xes.shape[0], 1)).T
     #adivs = T.tile(x[:, 2], (self.xes.shape[0], 1)).T
     #bxes = T.tile(self.xes, (x.shape[0], 1))
     #byes = T.tile(self.yes, (x.shape[0], 1))
     #o = self.rec(axes, ayes, adivs, bxes, byes, self.divmul)
     ret = Softmax()(o)
     return ret
コード例 #16
0
    def init(self):
        #MEMORY: encodes how entity is written + custom entity embeddings
        wencpg = WordEncoderPlusGlove(numchars=self.numchars,
                                      numwords=self.numwords,
                                      encdim=self.wordencdim,
                                      embdim=self.wordembdim,
                                      embtrainfrac=0.0,
                                      glovepath=self.glovepath)
        self.memenco = SeqEncoder(
            wencpg,
            GRU(dim=self.wordembdim + self.wordencdim,
                innerdim=self.encinnerdim))

        entemb = VectorEmbed(indim=self.outdim, dim=self.entembdim)
        self.mempayload = ConcatBlock(entemb, self.memenco)
        self.memblock = MemoryBlock(self.mempayload,
                                    self.memdata,
                                    indim=self.outdim,
                                    outdim=self.encinnerdim + self.entembdim)

        #ENCODER: uses the same language encoder as memory
        #wencpg2 = WordEncoderPlusGlove(numchars=self.numchars, numwords=self.numwords, encdim=self.wordencdim, embdim=self.wordembdim, embtrainfrac=0.0, glovepath=glovepath)
        self.enc = RecStack(
            wencpg,
            GRU(dim=self.wordembdim + self.wordencdim,
                innerdim=self.encinnerdim))

        #ATTENTION
        attgen = LinearGateAttentionGenerator(indim=self.encinnerdim +
                                              self.decinnerdim,
                                              innerdim=self.attdim)
        attcon = WeightedSumAttCon()

        #DECODER
        #entemb2 = VectorEmbed(indim=self.outdim, dim=self.entembdim)
        self.softmaxoutblock = stack(
            self.memaddr(self.memblock,
                         indim=self.decinnerdim + self.encinnerdim,
                         memdim=self.memblock.outdim,
                         attdim=self.attdim), Softmax())

        self.dec = SeqDecoder([
            self.memblock,
            GRU(dim=self.entembdim + self.encinnerdim,
                innerdim=self.decinnerdim)
        ],
                              outconcat=True,
                              inconcat=False,
                              attention=Attention(attgen, attcon),
                              innerdim=self.decinnerdim + self.encinnerdim,
                              softmaxoutblock=self.softmaxoutblock)
コード例 #17
0
    def init(self):
        #memory
        wencpg = WordEncoderPlusGlove(numchars=self.numchars,
                                      numwords=self.numwords,
                                      encdim=self.wordencdim,
                                      embdim=self.wordembdim,
                                      embtrainfrac=0.0,
                                      glovepath=self.glovepath)
        self.memenco = SeqEncoder(
            wencpg,
            GRU(dim=self.wordembdim + self.wordencdim,
                innerdim=self.encinnerdim))

        entemb = VectorEmbed(indim=self.outdim, dim=self.entembdim)
        self.mempayload = ConcatBlock(entemb, self.memenco)
        self.memblock = MemoryBlock(self.mempayload,
                                    self.memdata,
                                    indim=self.outdim,
                                    outdim=self.encinnerdim + self.entembdim)

        #encoder
        wencpg2 = WordEncoderPlusGlove(numchars=self.numchars,
                                       numwords=self.numwords,
                                       encdim=self.wordencdim,
                                       embdim=self.wordembdim,
                                       embtrainfrac=0.0,
                                       glovepath=self.glovepath)
        self.enc = SeqEncoder(
            wencpg2,
            GRU(dim=self.wordembdim + self.wordencdim,
                innerdim=self.encinnerdim))

        #decoder
        entemb2 = VectorEmbed(indim=self.outdim, dim=self.entembdim)
        self.softmaxoutblock = stack(
            self.memaddr(self.memblock,
                         indim=self.decinnerdim,
                         memdim=self.memblock.outdim,
                         attdim=self.attdim), Softmax())
        self.dec = SeqDecoder(
            [
                entemb2,  #self.memblock,
                GRU(
                    dim=entemb.dim + self.encinnerdim,
                    innerdim=self.decinnerdim
                ),  # GRU(dim=self.memblock.outdim + self.encinnerdim, innerdim=self.decinnerdim),
            ],
            inconcat=True,
            innerdim=self.decinnerdim,
            softmaxoutblock=self.softmaxoutblock)
コード例 #18
0
ファイル: enc.py プロジェクト: Natty307/teafacto
 def apply(self, x, mask=None, weights=None):
     ret = self.enc(x, mask=mask,
                    weights=weights)  # (batsize, seqlen, lastdim)
     outs = []
     # apply mask    (SeqEncoder should attach mask to outvar if all_outputs()
     mask = mask if mask is not None else ret.mask if hasattr(
         ret, "mask") else None
     if self.bidir:
         mid = ret.shape[2] / 2
         ret1 = ret[:, :, :mid]
         ret2 = ret[:, :, mid:]
         ret = ret1
     for i in range(self.numouts):
         selfweights = ret[:, :, i]  # (batsize, seqlen)
         if self.bidir:
             selfweights += ret2[:, :, i]
         selfweights = Softmax()(selfweights)
         if mask is not None:
             selfweights *= mask  # apply mask
         selfweights = selfweights / T.sum(selfweights, axis=1).dimshuffle(
             0, "x")  # renormalize
         weightedstates = ret[:, :, self.numouts:] * selfweights.dimshuffle(
             0, 1, "x")
         if self.bidir:
             weightedstates2 = ret2[:, :,
                                    self.numouts:] * selfweights.dimshuffle(
                                        0, 1, "x")
             weightedstates = T.concatenate(
                 [weightedstates, weightedstates2], axis=2)
         out = T.sum(weightedstates, axis=1)  # (batsize, lastdim)
         outs.append(out)
     if self.mode == "concat":
         ret = T.concatenate(outs, axis=1)
     elif self.mode == "seq":
         outs = [out.dimshuffle(0, "x", 1) for out in outs]
         ret = T.concatenate(outs, axis=1)
     return ret
コード例 #19
0
 def __init__(self,
              indim=400,
              embdim=50,
              inpemb=None,
              innerdim=100,
              outdim=50,
              rnu=GRU,
              **kw):
     if inpemb is None:
         emb = VectorEmbed(indim=indim, dim=embdim)
     else:
         emb = inpemb
         embdim = emb.outdim
     if not issequence(innerdim):
         innerdim = [innerdim]
     innerdim = [embdim] + innerdim
     rnn, _ = MakeRNU.fromdims(innerdim, rnu=rnu)
     smo = Lin(indim=innerdim[-1], dim=outdim)
     super(SimpleSeqTrans, self).__init__(emb, *(rnn + [smo, Softmax()]),
                                          **kw)
コード例 #20
0
 def apply(self, inp):
     enco = self.enc(inp)
     ret = Softmax()(self.out(enco))
     return ret
コード例 #21
0
ファイル: test_vae.py プロジェクト: qdbszsj/teafacto-1
 def apply(self, x):     # (batsize,)
     m = self.W_m[x]
     s = self.W_s[x]
     z = RVal(seed=self.seed).normal(m.shape) * s + m
     o = T.dot(z, self.O)
     return Softmax()(o)
コード例 #22
0
 def apply(self, inpseq):  # (batsize, amwords, amchars+1)
     inpenc = self.phraseencoder(inpseq)  # (batsize, encdim)
     #scores = T.dot(inpenc, self.W)          # (batsize, memdim)
     #scores = T.nnet.sigmoid(T.dot(scores, self.memblock.innervar.T))      # (batsize, memsize)
     #return Softmax()(scores)                    #
     return Softmax()(self.mema(inpenc))
コード例 #23
0
 def apply(self, inptensor):
     emb = self.W(inptensor)
     out = T.dot(emb, self.O)
     out.output_as("out")
     probs = Softmax()(out)
     return probs
コード例 #24
0
ファイル: attention.py プロジェクト: qdbszsj/teafacto-1
 def __init__(self, distance, normalizer=Softmax(), **kw):
     super(AttGen, self).__init__(**kw)
     self.dist = distance
     self.normalizer = normalizer
コード例 #25
0
ファイル: autornnencdec.py プロジェクト: nilesh-c/teafacto
 def apply(self, seqs):
     return Softmax()(self.outlin(self.enc(seqs)))