Esempio n. 1
0
 def __init__(self, discrete=True, memsize=50, **kw):
     super(XLTM, self).__init__(**kw)
     self.discrete = discrete
     self.memsize = memsize
     self.attgen = AttGen(DotDistance())
     self.attcon = WeightedSumAttCon(
     ) if self.discrete else WeightedSumAttCon()
Esempio n. 2
0
    def __init__(self,
                 wordembdim=50,
                 entembdim=200,
                 innerdim=200,
                 attdim=100,
                 outdim=1e4,
                 numwords=4e5,
                 **kw):
        super(FBSeqSimpEncDecAtt, self).__init__(**kw)
        self.indim = wordembdim
        self.outdim = outdim
        self.wordembdim = wordembdim
        self.encinnerdim = innerdim
        self.decinnerdim = innerdim
        self.entembdim = entembdim

        self.wordencoder = WordEmbed(indim=numwords,
                                     outdim=self.wordembdim,
                                     trainfrac=1.0)
        self.rnn = RecStack(
            self.wordencoder,
            GRU(dim=self.wordembdim, innerdim=self.encinnerdim))

        attgen = LinearGateAttentionGenerator(indim=self.encinnerdim +
                                              self.decinnerdim,
                                              attdim=attdim)
        attcon = WeightedSumAttCon()
        self.dec = SeqDecoder([
            VectorEmbed(indim=self.outdim, dim=self.entembdim),
            GRU(dim=self.entembdim, innerdim=self.decinnerdim)
        ],
                              attention=Attention(attgen, attcon),
                              outconcat=True,
                              inconcat=False,
                              innerdim=self.encinnerdim + self.decinnerdim)
Esempio n. 3
0
 def setUp(self):
     vocsize = 10
     innerdim = 50
     encdim = 30
     seqlen = 5
     batsize = 77
     self.att = Attention(
         LinearSumAttentionGenerator(indim=innerdim + encdim),
         WeightedSumAttCon())
     self.decwatt = SeqDecoder([
         IdxToOneHot(vocsize),
         GRU(dim=vocsize + encdim, innerdim=innerdim)
     ],
                               inconcat=True,
                               attention=self.att,
                               innerdim=innerdim)
     self.decwoatt = SeqDecoder([
         IdxToOneHot(vocsize),
         GRU(dim=vocsize + encdim, innerdim=innerdim)
     ],
                                inconcat=True,
                                innerdim=innerdim)
     self.attdata = np.random.random(
         (batsize, seqlen, encdim)).astype("float32")
     self.data = np.random.random((batsize, encdim)).astype("float32")
     self.seqdata = np.random.randint(0, vocsize, (batsize, seqlen))
     self.predshape = (batsize, seqlen, vocsize)
Esempio n. 4
0
 def __init__(self, vocsize=25, outvocsize=25, encdim=200, innerdim=200, attdim=50, **kw):
     super(RewAttSumDecoder, self).__init__(**kw)
     self.rnn = SeqEncoder(IdxToOneHot(vocsize), GRU(dim=vocsize, innerdim=encdim)).all_outputs
     attgen = LinearGateAttentionGenerator(indim=innerdim+encdim, innerdim=attdim)
     attcon = WeightedSumAttCon()
     self.dec = SeqDecoder([IdxToOneHot(outvocsize), GRU(dim=outvocsize+encdim, innerdim=innerdim)],
                           inconcat=True,
                           attention=Attention(attgen, attcon),
                           innerdim=innerdim)
Esempio n. 5
0
    def init(self):
        #MEMORY: encodes how entity is written + custom entity embeddings
        wencpg = WordEncoderPlusGlove(numchars=self.numchars,
                                      numwords=self.numwords,
                                      encdim=self.wordencdim,
                                      embdim=self.wordembdim,
                                      embtrainfrac=0.0,
                                      glovepath=self.glovepath)
        self.memenco = SeqEncoder(
            wencpg,
            GRU(dim=self.wordembdim + self.wordencdim,
                innerdim=self.encinnerdim))

        entemb = VectorEmbed(indim=self.outdim, dim=self.entembdim)
        self.mempayload = ConcatBlock(entemb, self.memenco)
        self.memblock = MemoryBlock(self.mempayload,
                                    self.memdata,
                                    indim=self.outdim,
                                    outdim=self.encinnerdim + self.entembdim)

        #ENCODER: uses the same language encoder as memory
        #wencpg2 = WordEncoderPlusGlove(numchars=self.numchars, numwords=self.numwords, encdim=self.wordencdim, embdim=self.wordembdim, embtrainfrac=0.0, glovepath=glovepath)
        self.enc = RecStack(
            wencpg,
            GRU(dim=self.wordembdim + self.wordencdim,
                innerdim=self.encinnerdim))

        #ATTENTION
        attgen = LinearGateAttentionGenerator(indim=self.encinnerdim +
                                              self.decinnerdim,
                                              innerdim=self.attdim)
        attcon = WeightedSumAttCon()

        #DECODER
        #entemb2 = VectorEmbed(indim=self.outdim, dim=self.entembdim)
        self.softmaxoutblock = stack(
            self.memaddr(self.memblock,
                         indim=self.decinnerdim + self.encinnerdim,
                         memdim=self.memblock.outdim,
                         attdim=self.attdim), Softmax())

        self.dec = SeqDecoder([
            self.memblock,
            GRU(dim=self.entembdim + self.encinnerdim,
                innerdim=self.decinnerdim)
        ],
                              outconcat=True,
                              inconcat=False,
                              attention=Attention(attgen, attcon),
                              innerdim=self.decinnerdim + self.encinnerdim,
                              softmaxoutblock=self.softmaxoutblock)
Esempio n. 6
0
 def setUp(self):
     criteriondim = 20
     datadim = 20
     innerdim = 30
     batsize = 33
     seqlen = 11
     self.attgenshape = (batsize, seqlen)
     self.attconshape = (batsize, datadim)
     self.attgen = self.getattgenc(critdim=criteriondim, datadim=datadim, attdim=innerdim)
     self.attgenparams = self.getattgenparams()
     self.attcon = WeightedSumAttCon()
     self.att = Attention(self.attgen, self.attcon)
     self.criterion_val = np.random.random((batsize, criteriondim)).astype("float32")
     self.data_val = np.random.random((batsize, seqlen, datadim)).astype("float32")
Esempio n. 7
0
    def __init__(self,
                 inpvocsize=400,
                 inpembdim=None,
                 outvocsize=100,
                 outembdim=None,
                 encdim=100,
                 decdim=100,
                 attdim=100,
                 bidir=False,
                 rnu=GRU,
                 statetrans=None,
                 vecout=False,
                 inconcat=True,
                 outconcat=False,
                 **kw):
        encinnerdim = [encdim] if not issequence(encdim) else encdim
        decinnerdim = [decdim] if not issequence(decdim) else decdim

        self.enclayers, lastencinnerdim = \
            self.getenclayers(inpembdim, inpvocsize, encinnerdim, bidir, rnu)

        self.declayers = \
            self.getdeclayers(outembdim, outvocsize, lastencinnerdim,
                              decinnerdim, rnu, inconcat)

        # attention
        lastdecinnerdim = decinnerdim[-1]
        argdecinnerdim = lastdecinnerdim if outconcat is False else lastencinnerdim + lastdecinnerdim
        attgen = LinearGateAttentionGenerator(indim=lastencinnerdim +
                                              lastdecinnerdim,
                                              attdim=attdim)
        attcon = WeightedSumAttCon()

        if statetrans is True:
            if lastencinnerdim != lastdecinnerdim:  # state shape mismatch
                statetrans = MatDot(lastencinnerdim, lastdecinnerdim)
        elif statetrans == "matdot":
            statetrans = MatDot(lastencinnerdim, lastdecinnerdim)

        super(SimpleSeqEncDecAtt, self).__init__(self.enclayers,
                                                 self.declayers,
                                                 attgen,
                                                 attcon,
                                                 argdecinnerdim,
                                                 statetrans=statetrans,
                                                 vecout=vecout,
                                                 inconcat=inconcat,
                                                 outconcat=outconcat,
                                                 **kw)
Esempio n. 8
0
    def __init__(self,
                 wordembdim=50,
                 wordencdim=50,
                 entembdim=200,
                 innerdim=200,
                 attdim=100,
                 outdim=1e4,
                 numwords=4e5,
                 numchars=128,
                 glovepath=None,
                 **kw):
        super(FBSeqCompEncDecAtt, self).__init__(**kw)
        self.indim = wordembdim + wordencdim
        self.outdim = outdim
        self.wordembdim = wordembdim
        self.wordencdim = wordencdim
        self.encinnerdim = innerdim
        self.entembdim = entembdim
        self.decinnerdim = innerdim

        self.wordencoder = WordEncoderPlusGlove(numchars=numchars,
                                                numwords=numwords,
                                                encdim=self.wordencdim,
                                                embdim=self.wordembdim,
                                                embtrainfrac=0.0,
                                                glovepath=glovepath)

        self.rnn = RecStack(
            self.wordencoder,
            GRU(dim=wordembdim + wordencdim, innerdim=self.encinnerdim))
        attgen = LinearGateAttentionGenerator(indim=self.encinnerdim +
                                              self.decinnerdim,
                                              innerdim=attdim)
        attcon = WeightedSumAttCon()
        self.dec = SeqDecoder([
            VectorEmbed(indim=self.outdim, dim=self.entembdim),
            GRU(dim=self.entembdim, innerdim=self.decinnerdim)
        ],
                              attention=Attention(attgen, attcon),
                              outconcat=True,
                              inconcat=False,
                              innerdim=self.encinnerdim + self.decinnerdim)
Esempio n. 9
0
 def __init__(self, discrete=True, memsize=50, **kw):
     self._waitforit = True
     super(XLTM, self).__init__(**kw)
     self.paramnames = [
         "usf",
         "wsf",
         "msf",
         "bsf",  # state filter gate
         "umf",
         "wmf",
         "mmf",
         "bmf",  # memory filter gate
         "u",
         "w",
         "m",
         "b",  # candidate state generation
         "uug",
         "wug",
         "mug",
         "bug",  # update gate
         "uwf",
         "wwf",
         "mwf",
         "bwf",  # memory writing filter
         "uma",
         "wma",
         "mma",
         "bma",  # memory addressing gate
         "uma2",
         "wma2",
         "mma2",
         "bma2",  # memory addressing gate 2
         ("uif", (self.innerdim, self.indim)),
         ("wif", (self.indim, self.indim)),
         ("mif", (self.innerdim, self.indim)),
         ("bif", (self.indim, )),  # input filter gate
     ]
     self.discrete = discrete
     self.memsize = memsize
     self.attgen = DotprodAttGen(self.innerdim, -1, self.innerdim)
     self.attcon = ArgmaxAttCon() if self.discrete else WeightedSumAttCon()
     self.initparams()
Esempio n. 10
0
 def _getattention(self, attdist, sepatt=False):
     attgen = AttGen(attdist)
     attcon = WeightedSumAttCon()
     attention = Attention(attgen, attcon, separate=sepatt)
     return attention