def LSTMCell(xt, h, c):

        f = edf.Sigmoid(edf.Add(edf.VDot(edf.ConCat(xt, h), Wf), bf))
        i = edf.Sigmoid(edf.Add(edf.VDot(edf.ConCat(xt, h), Wi), bi))
        o = edf.Sigmoid(edf.Add(edf.VDot(edf.ConCat(xt, h), Wo), bo))
        c_hat = edf.Tanh(edf.Add(edf.VDot(edf.ConCat(xt, h), Wc), bc))
        c_next = edf.Add(edf.Mul(f, c), edf.Mul(i, c_hat))
        h_next = edf.Mul(o, edf.Tanh(c_next))

        return h_next, c_next
Beispiel #2
0
def LSTMCell(xt, h, c, layer):

    f = edf.Sigmoid(edf.Add(edf.VDot(edf.ConCat(xt, h), Wf[layer]), bf[layer]))
    i = edf.Sigmoid(edf.Add(edf.VDot(edf.ConCat(xt, h), Wi[layer]), bi[layer]))
    o = edf.Sigmoid(edf.Add(edf.VDot(edf.ConCat(xt, h), Wo[layer]), bo[layer]))
    c_hat = edf.Tanh(edf.Add(edf.VDot(edf.ConCat(xt, h), Wc[layer]), bc[layer]))
    c_next = edf.Add(edf.Mul(f, c), edf.Mul(i, c_hat))
    h_next = edf.Mul(o, edf.Tanh(c_next))

    return h_next, c_next
Beispiel #3
0
def LSTMCell(x, h, c):
    concat = edf.ConCat(h, x)
    # Forget Gate
    f_gate = edf.Sigmoid(edf.Add(edf.VDot(concat, Wf), bf))

    # Input Gate
    i_gate = edf.Sigmoid(edf.Add(edf.VDot(concat, Wi), bi))

    # Temp Vars
    c_temp = edf.Tanh(edf.Add(edf.VDot(concat, Wc), bc))
    o_temp = edf.Sigmoid(edf.Add(edf.VDot(concat, Wo), bo))

    # Output
    c_next = edf.Add(edf.Mul(f_gate, c), edf.Mul(i_gate, c_temp))
    h_next = edf.Mul(o_temp, edf.Tanh(c_next))
    return h_next, c_next
Beispiel #4
0
def BuildModel():

    edf.components = []

    B, T = inp.value.shape

    score = []
    loss = None

    # Init h_0 with one-hot
    vocab_init = np.ones([B])
    vocab_init = edf.Value(vocab_init)
    h = edf.Embed(vocab_init, C2V)
    # Init C_0 to be zero
    c = edf.Value(np.zeros([B, hidden_dim]))

    for t in range(T):
        x_t = edf.Value(inp.value[:, t])

        x_t = edf.Embed(x_t, C2V)

        h, c = LSTMCell(x_t, h, c)
        # Score and loss

        pred = edf.SoftMax(edf.VDot(h, V))
        if t != T - 1:
            score.append(pred)
            x_t1 = edf.Value(inp.value[:, t + 1])
        else:
            x_t1 = edf.Value(np.zeros(B))
        loss_t = edf.LogLoss(edf.Aref(pred, x_t1))
        if loss is None:
            loss = loss_t
        else:
            loss = edf.Add(loss, loss_t)

    loss = edf.Mul(edf.Mean(loss), edf.Value(np.float64(1) / T))
    return loss, score
Beispiel #5
0
# evaluation bucket
bucket = 100
########################## Simple Convolution Nerual Network Model for Cifar 10 ##################################
##################################################################################################################
# please implement your main cnn model here, as described by the homework, you can mimic the previous code

f1 = edf.Param(edf.xavier((3, 3, prev_channel, 32)))
b1 = edf.Param(np.zeros(32))

f3 = edf.Param(edf.xavier((3, 3, 32, 64)))
b3 = edf.Param(np.zeros(64))

f5 = edf.Param(edf.xavier((1, 1, 64, 10)))
b5 = edf.Param(np.zeros(10))

layer1 = edf.RELU(edf.Add(Conv(f1, inp, 1, 1), b1))
layer2 = MaxPool(layer1, 4)
layer3 = edf.RELU(edf.Add(Conv(f3, layer2), b3))
layer4 = AvePool(layer3, 6)
layer5 = edf.RELU(edf.Add(Conv(f5, layer4), b5))
pred = edf.Reshape(layer5, (bucket, 10))

# the standard classification layer, which you don't need to modify
pred = edf.SoftMax(pred)
loss = edf.Mean(edf.LogLoss(edf.Aref(pred, lab)))
acc = edf.Accuracy(pred, lab)

##################################################################################################################


def eval_train():