コード例 #1
0
    t = jhML.Variable(x, requires_grad=False)
    activated_layers = net[:-1]
    for l in activated_layers:
        t = l(t)
        t = activ(t)
    return net[-1](t)


if __name__ == "__main__":

    x = [[0, 0], [0, 1], [1, 0], [1, 1]]

    gt = [[0], [1], [1], [0]]

    net = [L.Linear(2, 4), L.Linear(4, 8), L.Linear(8, 8), L.Linear(8, 1)]

    num_epoch = int(1e+5)
    for epoch in range(num_epoch):
        clear_grad(net)
        pred = forward(x, net)
        loss = F.mean_squared_error(pred, gt)
        loss.backward()

        if epoch % (num_epoch / 100) == 0:
            print("%d/%d" % (epoch, num_epoch))
            print(pred)
        update_grad(net, lr=8e-5)

    for num in x:
        pred = forward([num], net)
コード例 #2
0
ファイル: test_binary_number.py プロジェクト: jhson989/jhML
        [0, 1, 0],  #2
        [1, 1, 1],  #7
    ]

    gt = [
        [0],
        [6],
        [4],
        [3],
        [2],
        [7],
    ]

    num_class = 8

    net = nn.Sequential(nn.Linear(3, 4), nn.ReLU(), nn.Linear(4, 1000),
                        nn.ReLU(), nn.Linear(1000, 800), nn.ReLU(),
                        nn.Linear(800, 100), nn.ReLU(), nn.Linear(100, 8),
                        nn.ReLU(), nn.Linear(8, num_class)).to_gpu()

    #### learning
    optim = optim.RMSprop(net.params(), lr=1e-4)
    num_epoch = int(1e+4)
    for epoch in range(num_epoch):
        data, label = jhML.Variable(x).to_gpu(), jhML.Variable(gt).to_gpu()
        optim.zero_grad()
        pred = net(data)
        loss = F.softmax_cross_entropy(pred, label)
        loss.backward()
        optim.step()
コード例 #3
0
ファイル: test_save.py プロジェクト: jhson989/jhML
        os.path.dirname(
            os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))
import jhML
import jhML.layers as nn
import jhML.functions as F
import jhML.optimizer as optim
from jhML.tutorial.dataset import MNIST


def get_accuracy(pred, gt):
    pred = jhML.as_cpu(pred)
    gt = jhML.as_cpu(gt)
    return np.sum((pred == gt).astype(float)) / len(gt)


net = nn.Sequential(nn.Linear(28 * 28, 28 * 28), nn.ReLU(),
                    nn.Linear(28 * 28, 1000), nn.ReLU(), nn.Linear(1000, 1000),
                    nn.ReLU(), nn.Linear(1000, 100), nn.ReLU(),
                    nn.Linear(100, 10), nn.ReLU(),
                    nn.Linear(10, MNIST.num_class))

if __name__ == "__main__":

    using_gpu = True
    num_class = 10
    dataset_mnist_train = MNIST(train=True, flatten=True)
    dataset_mnist_test = MNIST(train=False, flatten=True)
    train_dataloader = jhML.Dataloader(dataset_mnist_train,
                                       batch_size=256,
                                       gpu=using_gpu,
                                       drop_last=True)
コード例 #4
0
sys.path.append( os.path.dirname( os.path.dirname( os.path.dirname( os.path.dirname( os.path.abspath(__file__) ) ) ) ))
import jhML
import jhML.layers as nn
import jhML.functions as F
import jhML.optimizer as optim
from jhML.tutorial.dataset import MNIST

def get_accuracy(pred, gt):
    pred = jhML.as_cpu(pred)
    gt = jhML.as_cpu(gt)
    return np.sum((pred == gt).astype(float))/len(gt)
    


net = nn.Sequential(
        nn.Linear(28*28, 28*28),
        nn.ReLU(),
        nn.Linear(28*28, 1000),
        nn.ReLU(),
        nn.Linear(1000, 1000),
        nn.ReLU(),
        nn.Linear(1000, 100),
        nn.ReLU(),
        nn.Linear(100, 10),
        nn.ReLU(),
        nn.Linear(10, MNIST.num_class)
            )

if __name__ == "__main__":

    using_gpu = True
コード例 #5
0
if __name__ == "__main__":

    x = [[0, 0],
         [0, 1],
         [1, 0],
         [1, 1]]
    
    gt = [[0],
          [1],
          [1],
          [0]]

    data = jhML.Variable(x)      

    net = nn.Sequential(
        nn.Linear(2, 4),
        nn.ReLU(),
        nn.Linear(4, 8),
        nn.ReLU(),
        nn.Linear(8, 8),
        nn.ReLU(),
        nn.Linear(8, 1)
            )

    #### learning
    optim = optim.RMSprop(net.params(), lr=1e-4)
    num_epoch = int(1e+5)
    for epoch in range(num_epoch):
        optim.zero_grad()
        pred = net(data)
        loss = F.mean_squared_error(pred, gt)