Пример #1
0
 def __init__(self, memblock, memdim=None, indim=None, attdim=None, **kw):
     assert (indim is not None and memdim is not None and attdim is not None)
     indim = memdim + indim
     innerdim = attdim
     super(LinearGateMemAddr, self).__init__(memblock, **kw)
     self.W = param((indim, innerdim), name="attention_ff").uniform()
     self.U = param((innerdim,), name="attention_agg").uniform()
Пример #2
0
 def makeparams(self):
     if not self.noinput:
         self.w = param((self.indim, self.innerdim),
                        name="w").init(self.paraminit)
         self.wm = param((self.indim, self.innerdim),
                         name="wm").init(self.paraminit)
         self.whf = param((self.indim, self.innerdim),
                          name="whf").init(self.paraminit)
     else:
         self.w, self.wm, self.whf = 0, 0, 0
     self.u = param((self.innerdim, self.innerdim),
                    name="u").init(self.paraminit)
     self.um = param((self.innerdim, self.innerdim),
                     name="um").init(self.paraminit)
     self.uhf = param((self.innerdim, self.innerdim),
                      name="uhf").init(self.paraminit)
     if not self.nobias:
         self.b = param((self.innerdim, ), name="b").init(self.biasinit)
         if self._init_carry_bias > 0:
             amnt = default_init_carry_gate_bias\
                 if self._init_carry_bias is True else self._init_carry_bias
             self.bm = param((self.innerdim, ), name="bm").constant(amnt)
         else:
             self.bm = param((self.innerdim, ),
                             name="bm").init(self.biasinit)
         self.bhf = param((self.innerdim, ), name="bhf").init(self.biasinit)
     else:
         self.b, self.bm, self.bhf = 0, 0, 0
Пример #3
0
 def __init__(self, vocsize, embdim, seed=None, **kw):
     super(EmbeddingVAE, self).__init__(**kw)
     self.W_m = param((vocsize, embdim)).glorotuniform()
     self.W_s = param((vocsize, embdim)).glorotuniform()
     self.O = param((embdim, vocsize)).glorotuniform()
     self.embdim = embdim
     self.seed = seed
Пример #4
0
 def __init__(self,
              indim,
              dim,
              w_init="glorotuniform",
              b_init="uniform",
              init_carry_bias=True,
              dropout=False,
              carry_activation=T.nnet.sigmoid,
              **kw):
     """ init_carry_bias sets carry gate bias to negative value to encourage carry behavior (see Highway Networks paper) """
     super(ForwardHighway, self).__init__(indim,
                                          dim,
                                          w_init=w_init,
                                          b_init=b_init,
                                          dropout=dropout,
                                          **kw)
     self.carry_activation = carry_activation
     self.W_t = None
     if indim != dim:
         self.W_t = param((indim, dim), name="W_t").init(self.w_init)
     self.W_c = param((self.indim, self.dim),
                      name="carry_W").init(self.w_init)
     if init_carry_bias > 0:
         amnt = default_carry_bias if init_carry_bias is True else init_carry_bias
         self.b_c = param((self.dim, ), name="carry_b").constant(-amnt)
     else:
         self.b_c = param((self.dim, ), name="carry_b").init(self.b_init)
Пример #5
0
 def __init__(self, memblock, memdim=None, indim=None, attdim=None, **kw):
     assert (indim is not None and memdim is not None and attdim is not None)
     indim = memdim + indim
     innerdim = attdim
     super(LinearGateMemAddr, self).__init__(memblock, **kw)
     self.W = param((indim, innerdim), name="attention_ff").uniform()
     self.U = param((innerdim,), name="attention_agg").uniform()
Пример #6
0
 def makeparams(self):
     if not self.noinput:
         self.w = param((self.indim, self.innerdim),
                        name="w").init(self.paraminit)
     else:
         self.w = 0
     self.u = param((self.innerdim, self.innerdim),
                    name="u").init(self.paraminit)
     if self.nobias is False:
         self.b = param((self.innerdim, ), name="b").init(self.biasinit)
     else:
         self.b = 0
Пример #7
0
 def makeparams(self):
     super(IFGRU, self).makeparams()
     self.uif = param((self.innerdim, self.indim),
                      name="uif").init(self.paraminit)
     if not self.noinput:
         self.wif = param((self.indim, self.indim),
                          name="wif").init(self.paraminit)
     else:
         self.wif = 0
     if not self.nobias:
         self.bif = param((self.indim, ), name="bif").init(self.biasinit)
     else:
         self.bif = 0
Пример #8
0
 def __init__(self, encdim, invocsize, outvocsize, innerdim, seqlen, **kw):
     super(idx2seqTheano, self).__init__(**kw)
     self.encdim = encdim
     self.invocsize = invocsize
     self.outvocsize = outvocsize
     self.innerdim = innerdim
     self.seqlen = seqlen
     self.wordemb = param((invocsize, encdim)).uniform()
     self.idxtovec = Val(np.eye(outvocsize, outvocsize))
     self.rnu_w = param((encdim + outvocsize, innerdim)).uniform()
     self.rnu_u = param((innerdim, innerdim)).uniform()
     self.outpf = theano.tensor.tanh
     self.olin = param((innerdim, outvocsize)).uniform()
     self.ownparams = [self.wordemb, self.rnu_u, self.rnu_w, self.olin]
Пример #9
0
 def __init__(self, encdim, invocsize, outvocsize, innerdim, seqlen, **kw):
     super(idx2seqTheano, self).__init__(**kw)
     self.encdim = encdim
     self.invocsize = invocsize
     self.outvocsize = outvocsize
     self.innerdim = innerdim
     self.seqlen = seqlen
     self.wordemb = param((invocsize, encdim)).uniform()
     self.idxtovec = Val(np.eye(outvocsize, outvocsize))
     self.rnu_w = param((encdim + outvocsize, innerdim)).uniform()
     self.rnu_u = param((innerdim, innerdim)).uniform()
     self.outpf = theano.tensor.tanh
     self.olin = param((innerdim, outvocsize)).uniform()
     self.ownparams = [self.wordemb, self.rnu_u, self.rnu_w, self.olin]
Пример #10
0
 def __init__(self,
              indim,
              dim,
              w_init="glorotuniform",
              b_init="uniform",
              dropout=False,
              **kw):
     super(Linear, self).__init__(**kw)
     self.indim = indim
     self.dim = dim
     self.w_init = w_init
     self.b_init = b_init
     self.W = param((self.indim, self.dim), name="linear_W").init(w_init)
     self.b = param((self.dim, ), name="linear_b").init(b_init)
     self.dropout = Dropout(dropout)
Пример #11
0
 def __init__(self,
              indim=None,
              dim=None,
              value=None,
              normalize=False,
              trainfrac=1.0,
              init=None,
              maskid=None,
              **kw):
     super(VectorEmbed, self).__init__(indim,
                                       dim,
                                       normalize=normalize,
                                       trainfrac=trainfrac,
                                       **kw)
     self.maskid = maskid
     if value is None:
         self.W = param((indim, dim), lrmul=self.trainfrac, name="embedder")
         if init == "zero":
             self.W = self.W.constant(0.0)
         elif init in ["glorot", None]:
             self.W = self.W.glorotuniform()
         elif init == "uniform":
             self.W = self.W.uniform()
     elif value is False:
         self.W = None  # no initialization
     else:
         self.setvalue(value)
         self.indim, self.outdim = value.shape
     if self.normalize:
         self.W = self.W.normalize(axis=1)
     # assertions
     assert (self.W is None
             or self.W.d.get_value().shape == (self.indim, self.outdim))
Пример #12
0
 def __init__(self,
              indim=1000,
              dim=50,
              value=None,
              normalize=False,
              trainfrac=1.0,
              **kw):
     super(VectorEmbed, self).__init__(indim, dim, **kw)
     self.dim = dim
     self.indim = indim
     self.trainfrac = trainfrac
     if value is None:
         self.W = param((indim, dim), lrmul=self.trainfrac,
                        name="embedder").glorotuniform()
     else:
         if trainfrac == 0.0:
             self.W = Val(value, name="embedder_val")
         else:
             self.W = Parameter(value,
                                lrmul=self.trainfrac,
                                name="embedder")
     if normalize:
         self.W = self.W.normalize(axis=1)
     # assertions
     assert (self.W.d.get_value().shape == (self.indim, self.dim))
Пример #13
0
 def setUp(self):
     dim = 50
     self.vocabsize = 2000
     data = np.arange(0, self.vocabsize).astype("int32")
     self.O = param((dim, self.vocabsize)).uniform()
     self.W = VectorEmbed(indim=self.vocabsize, dim=50)
     self.out = stack(self.W, asblock(lambda x: T.dot(self.O, x)),
                      Softmax())(Input(ndim=1, dtype="int32"))
Пример #14
0
 def setUp(self):
     dim=50
     self.vocabsize=2000
     data = np.arange(0, self.vocabsize).astype("int32")
     self.O = param((dim, self.vocabsize)).uniform()
     self.W = VectorEmbed(indim=self.vocabsize, dim=50)
     self.out = stack(self.W,
           asblock(lambda x: T.dot(self.O, x)),
           Softmax())(Input(ndim=1, dtype="int32"))
Пример #15
0
 def __init__(self,
              memblock,
              memdim=None,
              indim=None,
              attdim=None,
              **kw):  # indim should be mem_dim, innerdim should be crit_dim
     assert (indim is not None and memdim is not None
             )  # can not specify separate attention dimensions
     super(GeneralDotMemAddr, self).__init__(memblock, **kw)
     self.W = param((memdim, indim), name="addressing").uniform()
Пример #16
0
 def __init__(self, **kw):
     super(SimpleSeq2Sca, self).__init__(**kw)
     self.enc.all_outputs().with_mask()
     if "innerdim" in kw:
         kwindim = kw["innerdim"]
         if issequence(kwindim):
             summdim = kwindim[-1]
         else:
             summdim = kwindim
     else:
         summdim = 100
     self.summ = param((summdim, ), name="summarize").uniform()
Пример #17
0
 def __init__(self,
              indim,
              dim,
              value=None,
              init="glorotuniform",
              dropout=False,
              **kw):
     super(MatDot, self).__init__(**kw)
     self.indim = indim
     self.dim = dim
     if value is None:
         self.W = param((self.indim, self.dim), name="matdot").init(init)
     else:
         self.W = value
     self.dropout = Dropout(dropout)
Пример #18
0
 def __init__(self, outpactivation=T.tanh, param_init_states=False, **kw):
     self.outpactivation = outpactivation
     super(RNU, self).__init__(**kw)
     self.initstateparams = None
     if param_init_states:
         self.initstateparams = []
         for spec in self.get_statespec():
             if spec[0] == "state":
                 initstateparam = param(spec[1],
                                        name="init_state").uniform()
                 self.initstateparams.append(initstateparam)
             else:
                 self.initstateparams.append(None)
     self.param_init_states = param_init_states
     self.makeparams()
Пример #19
0
 def __init__(self, indim=1000, dim=50, value=None, normalize=False, trainfrac=1.0, **kw):
     super(VectorEmbed, self).__init__(indim, dim, **kw)
     self.dim = dim
     self.indim = indim
     self.trainfrac = trainfrac
     if value is None:
         self.W = param((indim, dim), lrmul=self.trainfrac, name="embedder").glorotuniform()
     else:
         if trainfrac == 0.0:
             self.W = Val(value, name="embedder_val")
         else:
             self.W = Parameter(value, lrmul=self.trainfrac, name="embedder")
     if normalize:
         self.W = self.W.normalize(axis=1)
     # assertions
     assert(self.W.d.get_value().shape == (self.indim, self.dim))
Пример #20
0
 def __init__(self,
              indim=50,
              outdim=50,
              window=5,
              border_mode="half",
              stride=1,
              filter_flip=True,
              **kw):
     super(Conv1D, self).__init__(**kw)
     if isinstance(border_mode, tuple):
         (border_mode, ) = border_mode
     if isinstance(border_mode, int):
         border_mode = (border_mode, 0)
     self.border_mode = border_mode
     self.stride = stride
     self.filter_flip = filter_flip
     self.filter_shape = (outdim, indim, window, 1)
     self.filter = param(self.filter_shape, name="conv_w").glorotuniform()
     self.maskfilter_shape = (1, 1, window, 1)
     self.maskfilter = Val(np.ones(self.maskfilter_shape, dtype="float32"))
Пример #21
0
 def initparams(self):
     for n, _ in self.rnuparams.items():        # delete existing params
         if hasattr(self, n):
             delattr(self, n)
     self.rnuparams = {}
     for paramname in self.paramnames:
         shape = None
         if isinstance(paramname, tuple):
             shape = paramname[1]
             paramname = paramname[0]
         if paramname[0] == "b" and self.nobias is True:
             setattr(self, paramname, 0)
             continue
         if shape is None:
             if paramname[0] == "b" or paramname[0] == "p": # bias or peepholes, internal weights
                 shape = (self.innerdim,)
             elif paramname[0] == "w": #input processing matrices
                 shape = (self.indim, self.innerdim)
             else: # internal recurrent matrices
                 shape = (self.innerdim, self.innerdim)
         self.rnuparams[paramname] = param(shape, name=paramname).init(self.paraminit)
         setattr(self, paramname, self.rnuparams[paramname])
Пример #22
0
 def initparams(self):
     for n, _ in self.rnuparams.items():  # delete existing params
         if hasattr(self, n):
             delattr(self, n)
     self.rnuparams = {}
     for paramname in self.paramnames:
         shape = None
         if isinstance(paramname, tuple):
             shape = paramname[1]
             paramname = paramname[0]
         if paramname[0] == "b" and self.nobias is True:
             setattr(self, paramname, 0)
             continue
         if shape is None:
             if paramname[0] == "b" or paramname[
                     0] == "p":  # bias or peepholes, internal weights
                 shape = (self.innerdim, )
             elif paramname[0] == "w":  #input processing matrices
                 shape = (self.indim, self.innerdim)
             else:  # internal recurrent matrices
                 shape = (self.innerdim, self.innerdim)
         self.rnuparams[paramname] = param(shape, name=paramname).init(
             self.paraminit)
         setattr(self, paramname, self.rnuparams[paramname])
Пример #23
0
 def __init__(self, memblock, memdim=None, indim=None, attdim=None, **kw):  # indim should be mem_dim, innerdim should be crit_dim
     assert(indim is not None and memdim is not None)     # can not specify separate attention dimensions
     super(GeneralDotMemAddr, self).__init__(memblock, **kw)
     self.W = param((memdim, indim), name="addressing").uniform()
Пример #24
0
from teafacto.core.base import tensorops as T, Val, param
import numpy as np
import sys

x = Val(np.random.random((10, 10)))
# y = Val(np.random.random((10,10)))
y = param((10, 10), name="y").uniform()
w = param((10, 10), name="w").uniform()

# z = T.dot(x, y)
z = x + y
u = z * w
s = T.nnet.sigmoid
s2 = T.nnet.sigmoid
print s == s2
sys.exit()
print z.allparams
print T.dot
print z.ndim
print z.dimswap
zd = z.dimswap(1, 0)
print z.dimswap(0, 1).allparams
print y.dimswap(0, 1).allparams
print T.nnet.conv.conv2d
print u.norm(2).allparams
print u.dimswap(0, 1).allparams
print T.nnet.softmax(z).allparams
zs = T.nnet.sigmoid(z)
zs = zs + x
zs.autobuild()
zs.autobuild()
Пример #25
0
 def __init__(self, indim, aggdim, **kw):
     super(LinearDistance, self).__init__(**kw)
     self.W = param((indim, aggdim), name="attention_ff").uniform()
     self.U = param((aggdim,), name="attention_agg").uniform()
Пример #26
0
 def __init__(self, embdim, numents, numrels, **kw):
     self.A = VectorEmbed(indim=numents, dim=embdim)
     self.R = param((numrels, embdim, embdim),
                    name="rel_embed").glorotuniform()
     self.scorer = DotDistance()
     super(Rescal, self).__init__(**kw)
Пример #27
0
    def __init__(self,
                 inpvocsize=None,
                 inpembdim=None,
                 inpemb=None,
                 inpencinnerdim=None,
                 bidir=False,
                 maskid=None,
                 dropout=False,
                 rnu=GRU,
                 inpencoder=None,
                 memvocsize=None,
                 memembdim=None,
                 memembmat=None,
                 memencinnerdim=None,
                 memencoder=None,
                 inp_att_dist=CosineDistance(),
                 mem_att_dist=CosineDistance(),
                 inp_attention=None,
                 mem_attention=None,
                 coredims=None,
                 corernu=GRU,
                 core=None,
                 explicit_interface=False,
                 scalaraggdim=None,
                 write_value_dim=None,
                 nsteps=100,
                 posvecdim=None,
                 mem_pos_repr=None,
                 inp_pos_repr=None,
                 inp_addr_extractor=None,
                 mem_addr_extractor=None,
                 write_addr_extractor=None,
                 write_addr_generator=None,
                 write_addr_dist=CosineDistance(),
                 write_value_generator=None,
                 write_value_extractor=None,
                 mem_erase_generator=None,
                 mem_change_generator=None,
                 memsampler=None,
                 memsamplemethod=None,
                 memsampletemp=0.3,
                 **kw):

        # INPUT ENCODING
        if inpencoder is None:
            inpencoder = SeqEncoder.RNN(indim=inpvocsize,
                                        inpembdim=inpembdim,
                                        inpemb=inpemb,
                                        innerdim=inpencinnerdim,
                                        bidir=bidir,
                                        maskid=maskid,
                                        dropout_in=dropout,
                                        dropout_h=dropout,
                                        rnu=rnu).all_outputs()
            lastinpdim = inpencinnerdim if not issequence(
                inpencinnerdim) else inpencinnerdim[-1]
        else:
            lastinpdim = inpencoder.block.layers[-1].innerdim

        # MEMORY ENCODING
        if memembmat is None:
            memembmat = param((memvocsize, memembdim),
                              name="memembmat").glorotuniform()
        if memencoder is None:
            memencoder = SeqEncoder.RNN(inpemb=False,
                                        innerdim=memencinnerdim,
                                        bidir=bidir,
                                        dropout_in=dropout,
                                        dropout_h=dropout,
                                        rnu=rnu,
                                        inpembdim=memembdim).all_outputs()
            lastmemdim = memencinnerdim if not issequence(
                memencinnerdim) else memencinnerdim[-1]
        else:
            lastmemdim = memencoder.block.layers[-1].innerdim

        # POSITION VECTORS
        if posvecdim is not None and inp_pos_repr is None:
            inp_pos_repr = RNNWithoutInput(posvecdim, dropout=dropout)
        if posvecdim is not None and mem_pos_repr is None:
            mem_pos_repr = RNNWithoutInput(posvecdim, dropout=dropout)

        xtra_dim = posvecdim if posvecdim is not None else 0
        # CORE RNN - THE THINKER
        if core is None:
            corelayers, _ = MakeRNU.fromdims(
                [lastinpdim + lastmemdim + xtra_dim * 2] + coredims,
                rnu=corernu,
                dropout_in=dropout,
                dropout_h=dropout,
                param_init_states=True)
            core = RecStack(*corelayers)

        lastcoredim = core.get_statespec()[-1][0][1][0]

        # ATTENTIONS
        if mem_attention is None:
            mem_attention = Attention(mem_att_dist)
        if inp_attention is None:
            inp_attention = Attention(inp_att_dist)
        if write_addr_generator is None:
            write_addr_generator = AttGen(write_addr_dist)

        # WRITE VALUE
        if write_value_generator is None:
            write_value_generator = WriteValGenerator(write_value_dim,
                                                      memvocsize,
                                                      dropout=dropout)

        # MEMORY SAMPLER
        if memsampler is not None:
            assert (memsamplemethod is None)
        if memsamplemethod is not None:
            assert (memsampler is None)
            memsampler = GumbelSoftmax(temperature=memsampletemp)

        ################ STATE INTERFACES #################

        if not explicit_interface:
            if inp_addr_extractor is None:
                inp_addr_extractor = Forward(lastcoredim,
                                             lastinpdim + xtra_dim,
                                             dropout=dropout)
            if mem_addr_extractor is None:
                inp_addr_extractor = Forward(lastcoredim,
                                             lastmemdim + xtra_dim,
                                             dropout=dropout)

            # WRITE INTERFACE
            if write_addr_extractor is None:
                write_addr_extractor = Forward(lastcoredim,
                                               lastmemdim + xtra_dim,
                                               dropout=dropout)
            if write_value_extractor is None:
                write_value_extractor = Forward(lastcoredim,
                                                write_value_dim,
                                                dropout=dropout)

            # MEM UPDATE INTERFACE
            if mem_erase_generator is None:
                mem_erase_generator = StateToScalar(lastcoredim, scalaraggdim)
            if mem_change_generator is None:
                mem_change_generator = StateToScalar(lastcoredim, scalaraggdim)
        else:
            inp_addr_extractor, mem_addr_extractor, write_addr_extractor, \
            write_value_extractor, mem_erase_generator, mem_change_generator = \
                make_vector_slicers(0, lastinpdim + xtra_dim, lastmemdim + xtra_dim,
                                    lastmemdim + xtra_dim, write_value_dim, 1, 1)

        super(SimpleBulkNN,
              self).__init__(inpencoder=inpencoder,
                             memembmat=memembmat,
                             memencoder=memencoder,
                             inp_attention=inp_attention,
                             mem_attention=mem_attention,
                             core=core,
                             memsampler=memsampler,
                             nsteps=nsteps,
                             inp_addr_extractor=inp_addr_extractor,
                             mem_addr_extractor=mem_addr_extractor,
                             write_addr_extractor=write_addr_extractor,
                             write_addr_generator=write_addr_generator,
                             mem_erase_generator=mem_erase_generator,
                             mem_change_generator=mem_change_generator,
                             write_value_generator=write_value_generator,
                             write_value_extractor=write_value_extractor,
                             inp_pos_repr=inp_pos_repr,
                             mem_pos_repr=mem_pos_repr,
                             **kw)
Пример #28
0
 def __init__(self, indim=100, inpembdim=50, outdim=100, **kw):
     self.E = param((indim, inpembdim), name="emb").uniform()
     self.W = param((inpembdim, outdim), name="W").uniform()
     super(StupidAtis, self).__init__(**kw)
Пример #29
0
 def __init__(self, dim=500, sharpness=1, **kw):
     super(SpatialEmb, self).__init__(**kw)
     self.xes = param((dim, ), name="xes").constant(5000.0)
     self.yes = param((dim, ), name="yes").constant(5000.0)
     self.divmul = param((), name="divmul").constant(1.0)
     self.e = sharpness
Пример #30
0
 def __init__(self, indim=100, inpembdim=50, outdim=100, **kw):
     self.E = param((indim, inpembdim), name="emb").uniform()
     self.W = param((inpembdim, outdim), name="W").uniform()
     super(StupidAtis, self).__init__(**kw)
Пример #31
0
 def __init__(self, indim=100, inpembdim=50, outdim=100, **kw):
     super(StupidAtisNative, self).__init__(**kw)
     self.E = self.add_param(param((indim, inpembdim), name="emb").uniform())
     self.W = self.add_param(param((inpembdim, outdim), name="W").uniform())
Пример #32
0
 def test_params_propagated_through_rnu(self):
     O = param((self.dim, self.dim), name="bigo").uniform()
     i = Input(ndim=2, dtype="int32")
     x = O[i, :]
     out = self.rnu(x)
     self.assertIn(O, out.allparams)
Пример #33
0
 def __init__(self, dim=500, sharpness=1, **kw):
     super(SpatialEmb, self).__init__(**kw)
     self.xes = param((dim,), name="xes").constant(5000.0)
     self.yes = param((dim,), name="yes").constant(5000.0)
     self.divmul = param((), name="divmul").constant(1.0)
     self.e = sharpness
Пример #34
0
 def __init__(self, indim, dim, w_init="uniform", b_init="uniform", **kw):
     super(Linear, self).__init__(**kw)
     self.indim = indim
     self.dim = dim
     self.W = param((self.indim, self.dim)).init(w_init)
     self.b = param((self.dim, )).init(b_init)
Пример #35
0
 def __init__(self, indim, dim, init="uniform", **kw):
     super(MatDot, self).__init__(**kw)
     self.indim = indim
     self.dim = dim
     self.W = param((self.indim, self.dim), name="matdot").init(init)
Пример #36
0
 def __init__(self, indim, dim, w_init="uniform", b_init="uniform", **kw):
     super(Linear, self).__init__(**kw)
     self.indim = indim
     self.dim = dim
     self.W = param((self.indim, self.dim)).init(w_init)
     self.b = param((self.dim,)).init(b_init)
Пример #37
0
 def __init__(self, **kw):
     super(LinearGateAttentionGenerator, self).__init__(**kw)
     self.W = param((self.indim, self.attdim), name="attention_ff").uniform()
     self.U = param((self.attdim,), name="attention_agg").uniform()
Пример #38
0
 def __init__(self, memblock, memdim=None, indim=None, attdim=None, **kw):
     super(TransDotMemAddr, self).__init__(memblock, **kw)
     self.W = param((memdim, attdim), name="addr_memtrans").uniform()
     self.U = param((indim, attdim), name="addr_crittrans").uniform()
Пример #39
0
 def __init__(self, memblock, memdim=None, indim=None, attdim=None, **kw):
     super(TransDotMemAddr, self).__init__(memblock, **kw)
     self.W = param((memdim, attdim), name="addr_memtrans").uniform()
     self.U = param((indim, attdim), name="addr_crittrans").uniform()
Пример #40
0
 def makeparams(self):
     self.usf = param((self.innerdim, self.innerdim),
                      name="usf").init(self.paraminit)
     self.umf = param((self.innerdim, self.innerdim),
                      name="umf").init(self.paraminit)
     self.u = param((self.innerdim, self.innerdim),
                    name="u").init(self.paraminit)
     self.uug = param((self.innerdim, self.innerdim),
                      name="uug").init(self.paraminit)
     self.uwf = param((self.innerdim, self.innerdim),
                      name="uwf").init(self.paraminit)
     self.uma = param((self.innerdim, self.innerdim),
                      name="uma").init(self.paraminit)
     self.uma2 = param((self.innerdim, self.innerdim),
                       name="uma2").init(self.paraminit)
     self.uif = param((self.innerdim, self.indim),
                      name="uif").init(self.paraminit)
     self.w = param((self.indim, self.innerdim),
                    name="w").init(self.paraminit)
     self.wsf = param((self.indim, self.innerdim),
                      name="wsf").init(self.paraminit)
     self.wmf = param((self.indim, self.innerdim),
                      name="wmf").init(self.paraminit)
     self.wug = param((self.indim, self.innerdim),
                      name="wug").init(self.paraminit)
     self.wwf = param((self.indim, self.innerdim),
                      name="wwf").init(self.paraminit)
     self.wma = param((self.indim, self.innerdim),
                      name="wma").init(self.paraminit)
     self.wma2 = param((self.indim, self.innerdim),
                       name="wma2").init(self.paraminit)
     self.wif = param((self.indim, self.indim),
                      name="wif").init(self.paraminit)
     self.m = param((self.innerdim, self.innerdim),
                    name="m").init(self.paraminit)
     self.msf = param((self.innerdim, self.innerdim),
                      name="msf").init(self.paraminit)
     self.mmf = param((self.innerdim, self.innerdim),
                      name="mmf").init(self.paraminit)
     self.mug = param((self.innerdim, self.innerdim),
                      name="mug").init(self.paraminit)
     self.mwf = param((self.innerdim, self.innerdim),
                      name="mwf").init(self.paraminit)
     self.mma = param((self.innerdim, self.innerdim),
                      name="mma").init(self.paraminit)
     self.mma2 = param((self.innerdim, self.innerdim),
                       name="mma2").init(self.paraminit)
     self.mif = param((self.innerdim, self.indim),
                      name="mif").init(self.paraminit)
     if not self.nobias:
         self.b = param((self.innerdim, ), name="b").init(self.biasinit)
         self.bsf = param((self.innerdim, ), name="bsf").init(self.biasinit)
         self.bmf = param((self.innerdim, ), name="bmf").init(self.biasinit)
         self.bug = param((self.innerdim, ), name="bug").init(self.biasinit)
         self.bwf = param((self.innerdim, ), name="bwf").init(self.biasinit)
         self.bma = param((self.innerdim, ), name="bma").init(self.biasinit)
         self.bma2 = param((self.innerdim, ),
                           name="bma2").init(self.biasinit)
         self.bif = param((self.indim, ), name="bif").init(self.biasinit)
     else:
         self.b, self.bsf, self.bmf, self.bug, self.bwf, self.bma, self.bma2, self.bif = \
             0, 0, 0, 0, 0, 0, 0, 0
Пример #41
0
 def __init__(self, embdim, numents, numrels, **kw):
     self.A = VectorEmbed(indim=numents, dim=embdim)
     self.R = param((numrels, embdim, embdim), name="rel_embed").glorotuniform()
     self.scorer = DotDistance()
     super(Rescal, self).__init__(**kw)
Пример #42
0
 def __init__(self, indim=100, inpembdim=50, outdim=100, **kw):
     super(StupidAtisNative, self).__init__(**kw)
     self.E = self.add_param(
         param((indim, inpembdim), name="emb").uniform())
     self.W = self.add_param(param((inpembdim, outdim), name="W").uniform())
Пример #43
0
 def __init__(self, indim, aggdim, **kw):
     super(LinearDistance, self).__init__(**kw)
     self.W = param((indim, aggdim), name="attention_ff").uniform()
     self.U = param((aggdim, ), name="attention_agg").uniform()
Пример #44
0
 def __init__(self, dim, outdim, **kw):
     super(StateToScalar, self).__init__(**kw)
     self.block = Forward(dim, outdim)
     self.agg = param((outdim, ), name="scalartostate_agg").uniform()
Пример #45
0
 def __init__(self, ldim, rdim, **kw):
     super(GenDotDistance, self).__init__(**kw)
     self.W = param((rdim, ldim), name="gendotdist").glorotuniform()
Пример #46
0
 def __init__(self, ldim, rdim, **kw):
     super(GenDotDistance, self).__init__(**kw)
     self.W = param((rdim, ldim), name="gendotdist").glorotuniform()
Пример #47
0
from teafacto.core.base import tensorops as T, Val, param
import numpy as np
import sys

x = Val(np.random.random((10, 10)))
#y = Val(np.random.random((10,10)))
y = param((10, 10), name="y").uniform()
w = param((10, 10), name="w").uniform()

#z = T.dot(x, y)
z = (x + y)
u = z * w
s = T.nnet.sigmoid
s2 = T.nnet.sigmoid
print s == s2
sys.exit()
print z.allparams
print T.dot
print z.ndim
print z.dimswap
zd = z.dimswap(1, 0)
print z.dimswap(0, 1).allparams
print y.dimswap(0, 1).allparams
print T.nnet.conv.conv2d
print u.norm(2).allparams
print u.dimswap(0, 1).allparams
print T.nnet.softmax(z).allparams
zs = T.nnet.sigmoid(z)
zs = zs + x
zs.autobuild()
zs.autobuild()
Пример #48
0
 def __init__(self, indim, dim, init="uniform", **kw):
     super(MatDot, self).__init__(**kw)
     self.indim = indim
     self.dim = dim
     self.W = param((self.indim, self.dim), name="matdot").init(init)
Пример #49
0
 def __init__(self, dim, **kw):
     self.dim = dim
     self.w = param((dim,), name="weights").uniform()
     self.b = param((1,), name="bias").uniform()
     super(BinaryLinear, self).__init__(**kw)