예제 #1
0
def forward(p, h, x_true, y_true, i):

    i *= 0

    inp = join2(h, x_true)

    emb = T.dot(inp, p['W0'])

    h0 = lngru_layer(p,
                     emb, {},
                     prefix='gru1',
                     mask=None,
                     one_step=True,
                     init_state=h[:, :1024],
                     backwards=False)

    h1 = T.nnet.relu(ln(T.dot(h0[0], p['W1'][i])), alpha=0.02)
    h2 = T.nnet.relu(ln(T.dot(h1, p['W2'][i])), alpha=0.02)
    #h2 = h1

    y_est = T.nnet.softmax(T.dot(h2, p['Wy'][i]))

    #h_next = T.dot(h2, p['Wo'][i])
    h_next = h1

    loss = crossent(y_est, y_true)

    acc = accuracy(y_est, y_true)

    return h_next, y_est, loss, acc, y_est
예제 #2
0
def network(p, x, true_y):

    x = x.flatten(2)

    h1 = lrelu(T.dot(x, p['W1']))
    h2 = lrelu(T.dot(h1, p['W2']))
    y = T.nnet.softmax(T.dot(h2, p['W3']))

    loss = crossent(y, true_y)
    acc = accuracy(y, true_y)

    return loss, acc
예제 #3
0
def network(params, x, y, p1, p2):

    #x *= srng.binomial(n=1,p=p1,size=x.shape,dtype='float32').astype('float32')/p1

    h1 = T.nnet.relu(bn(T.dot(bn(x), params['W1']) + params['b1']))
    #h1 *= srng.binomial(n=1,p=p2,size=h1.shape,dtype='float32').astype('float32')/p2
    h2 = T.nnet.relu(bn(T.dot(h1, params['W2']) + params['b2']))
    #h2 *= srng.binomial(n=1,p=p2,size=h2.shape,dtype='float32').astype('float32')/p2
    h3 = bn(T.dot(h2, params['W3']) + params['b3'])

    p = T.nnet.softmax(h3)

    loss = crossent(p, y)
    acc = accuracy(p, y)

    return {'loss': loss, 'p': p, 'acc': acc}
예제 #4
0
def forward(p, h, x_true, y_true, i):

    inp = join2(h, x_true)

    h1 = T.nnet.relu(ln(T.dot(inp, p['W1'][i])), alpha=0.02)
    #h2 = T.nnet.relu(ln(T.dot(h1, p['W2'])), alpha=0.02)
    h2 = h1

    y_est = T.nnet.softmax(T.dot(h2, p['Wy'][i]))

    #h_next = T.dot(h2, p['Wo'][i])
    h_next = h1

    loss = crossent(y_est, y_true)

    acc = accuracy(y_est, y_true)

    return h_next, y_est, loss, acc
예제 #5
0
def forward(p, h, x_true, y_true):


    print "USING LAYER NORM"

    emb = T.dot(x_true, p['w1'])

    h_next1 = lngru_layer(p,emb,{},prefix='gru1',mask=None,one_step=True,init_state=h[:,:512],backwards=False)

    #h_next2 = lngru_layer(p,h_next1[0],{},prefix='gru2',mask=None,one_step=True,init_state=h[:,512:],backwards=False)

    hout = join2(h_next1[0], x_true)

    h2 = T.tanh(ln(T.dot(hout, p['w2']) + p['b2']))

    y_est = T.nnet.softmax(T.dot(h2, p['Wy']) + p['by'])

    loss = nll(y_est, y_true,2)

    acc = accuracy(y_est, y_true)

    return h_next1[0], y_est, loss, acc
예제 #6
0
파일: crf.py 프로젝트: tbepler/rnn
 def loss(self, Yh, Y):
     return accuracy(Yh, Y, step=self.step)