Ejemplo n.º 1
0
def forward(x, net):
    activ = F.relu

    t = jhML.Variable(x, requires_grad=False)
    activated_layers = net[:-1]
    for l in activated_layers:
        t = l(t)
        t = activ(t)
    return net[-1](t)
Ejemplo n.º 2
0
    activ = F.relu
    W1, b1, W2, b2, W3, b3, W4, b4 = parameters
    t = x
    t = activ((F.linear(t, W1, b1)))
    t = activ((F.linear(t, W2, b2)))
    #    t = activ((F.linear(t, W3, b3)))
    return F.linear(t, W4, b4)


if __name__ == "__main__":

    x = [[0, 0], [0, 1], [1, 0], [1, 1]]

    gt = [[0], [1], [1], [0]]

    W1 = jhML.Variable(np.random.randn(2, 4))
    b1 = jhML.Variable(np.random.randn(4))

    W2 = jhML.Variable(np.random.randn(4, 12))
    b2 = jhML.Variable(np.random.randn(12))

    W3 = jhML.Variable(np.random.randn(12, 12))
    b3 = jhML.Variable(np.random.randn(12))

    W4 = jhML.Variable(np.random.randn(12, 1))
    b4 = jhML.Variable(np.random.randn(1))

    parameters = [W1, b1, W2, b2, W3, b3, W4, b4]

    num_epoch = int(1e+5)
    for epoch in range(num_epoch):
Ejemplo n.º 3
0
        [2],
        [7],
    ]

    num_class = 8

    net = nn.Sequential(nn.Linear(3, 4), nn.ReLU(), nn.Linear(4, 1000),
                        nn.ReLU(), nn.Linear(1000, 800), nn.ReLU(),
                        nn.Linear(800, 100), nn.ReLU(), nn.Linear(100, 8),
                        nn.ReLU(), nn.Linear(8, num_class)).to_gpu()

    #### learning
    optim = optim.RMSprop(net.params(), lr=1e-4)
    num_epoch = int(1e+4)
    for epoch in range(num_epoch):
        data, label = jhML.Variable(x).to_gpu(), jhML.Variable(gt).to_gpu()
        optim.zero_grad()
        pred = net(data)
        loss = F.softmax_cross_entropy(pred, label)
        loss.backward()
        optim.step()

        if epoch % (num_epoch / 100) == 0:
            print("%d/%d" % (epoch, num_epoch))
            print(F.argmax(pred))

    for num in x:
        data = jhML.Variable(num).to_gpu()
        pred = net(data).to_cpu()
        print("0b%d%d%d = %d" %
              (num[0], num[1], num[2], F.argmax(pred, axis=0)))
Ejemplo n.º 4
0
import jhML.layers as nn
import jhML.optimizer as optim

if __name__ == "__main__":

    x = [[0, 0],
         [0, 1],
         [1, 0],
         [1, 1]]
    
    gt = [[0],
          [1],
          [1],
          [0]]

    data = jhML.Variable(x)      

    net = nn.Sequential(
        nn.Linear(2, 4),
        nn.ReLU(),
        nn.Linear(4, 8),
        nn.ReLU(),
        nn.Linear(8, 8),
        nn.ReLU(),
        nn.Linear(8, 1)
            )

    #### learning
    optim = optim.RMSprop(net.params(), lr=1e-4)
    num_epoch = int(1e+5)
    for epoch in range(num_epoch):