예제 #1
0
from bijou.metrics import accuracy
from bijou.learner import Learner
from bijou.data import Dataset, DataLoader, DataBunch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from bijou.datasets import mnist
import matplotlib.pyplot as plt

x_train, y_train, x_valid, y_valid, x_test, y_test = mnist()
train_ds, valid_ds, test_ds = Dataset(x_train, y_train), Dataset(
    x_valid, y_valid), Dataset(x_test, y_test)
bs = 128
train_dl = DataLoader(train_ds, batch_size=bs, shuffle=True)
valid_dl = DataLoader(valid_ds, batch_size=bs)
test_dl = DataLoader(test_ds, batch_size=bs)
data = DataBunch(train_dl, valid_dl)

in_dim = data.train_ds.x.shape[1]
h_dim = 128
model = nn.Sequential(nn.Linear(in_dim, h_dim), nn.ReLU(),
                      nn.Linear(h_dim, 10))
opt = optim.SGD(model.parameters(), lr=0.35)

learner = Learner(model, opt, F.cross_entropy, data, metrics=[accuracy])

learner.fit_one_cycle(5, high_lr=0.35)

learner.recorder.plot()
plt.show()
예제 #2
0
    def forward(self, data):
        x, edge_index = data.x, data.edge_index
        x = self.conv1(x, edge_index)
        x = F.relu(x)
        x = self.conv2(x, edge_index)
        outputs = F.relu(x)
        return outputs


model = Model(dataset.num_node_features, dataset.num_classes)
opt = optim.SGD(model.parameters(), lr=0.5, weight_decay=0.01)

learner = Learner(model,
                  opt,
                  masked_cross_entropy,
                  data,
                  metrics=[masked_accuracy],
                  callbacks=PyGGraphInterpreter)

learner.fit(100)
learner.test(test_dl)
learner.predict(test_dl)


def loss_noreduction(pred, target):
    return F.cross_entropy(pred[target.mask],
                           target.data[target.mask],
                           reduction='none')


scores, xs, ys, preds, indecies = learner.interpreter.top_data(
예제 #3
0
bs = 128
train_dl = DataLoader(train_ds, batch_size=bs, shuffle=True)
valid_dl = DataLoader(valid_ds, batch_size=bs)
test_dl = DataLoader(test_ds, batch_size=bs)
data = DataBunch(train_dl, valid_dl)

in_dim = data.train_ds.x.shape[1]
h_dim = 128
model = nn.Sequential(nn.Linear(in_dim, h_dim), nn.ReLU(),
                      nn.Linear(h_dim, 10))
opt = optim.SGD(model.parameters(), lr=0.35)

loss_func = F.cross_entropy
learner = Learner(model,
                  opt,
                  loss_func,
                  data,
                  metrics=[accuracy],
                  callbacks=Interpreter())

learner.fit(3)
learner.test(test_dl)


def loss_noreduction(pred, target):
    return F.cross_entropy(pred, target, reduction='none')


scores, xs, ys, preds, indecies = learner.interpreter.top_data(
    metric=loss_noreduction, k=10, phase='train', largest=True)
print(scores)
print(indecies)
예제 #4
0
train_ds, valid_ds, test_ds = Dataset(x_train, y_train), Dataset(x_valid, y_valid), Dataset(x_test, y_test)
bs = 128
train_dl = DataLoader(train_ds, batch_size=bs, shuffle=True)
valid_dl = DataLoader(valid_ds, batch_size=bs)
test_dl = DataLoader(test_ds, batch_size=bs)
data = DataBunch(train_dl, valid_dl)


in_dim = data.train_ds.x.shape[1]
h_dim = 128
model = nn.Sequential(nn.Linear(in_dim, h_dim), nn.ReLU(), nn.Linear(h_dim, 10))
opt = optim.SGD(model.parameters(), lr=0.35)


loss_func = F.cross_entropy
learner = Learner(model, opt, loss_func, data, metrics=[accuracy], callbacks=GradientClipping(0.001))

learner.fit(3)
learner.test(test_dl)
pred = learner.predict(x_valid)

print(pred.size())

learner.recorder.plot_loss()
learner.recorder.plot_metrics()
plt.show()



# import sys
# sys.path.append('..')
예제 #5
0
        self.conv2 = GCNConv(16, class_num)

    def forward(self, data):
        x, edge_index = data.x, data.edge_index
        x = self.conv1(x, edge_index)
        x = F.relu(x)
        x = self.conv2(x, edge_index)
        outputs = F.relu(x)
        return outputs

model = Model(dataset.num_node_features, dataset.num_classes)
opt = optim.SGD(model.parameters(), lr=0.5, weight_decay=0.01)


# 3. learner
learner = Learner(model, opt, masked_cross_entropy, data, metrics=[masked_accuracy])

# 4. fit
learner.fit(100)

# 5. test
learner.test(test_data)

# 6. predict
pred = learner.predict(dataset[0])
print(pred.size())

# 7. plot
learner.recorder.plot_metrics()
plt.show()
예제 #6
0
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.gcn1 = GCN(1433, 16, F.relu)
        self.gcn2 = GCN(16, 7, None)

    def forward(self, g, features):
        x = self.gcn1(g, features)
        x = self.gcn2(g, x)
        return x

net = Net()
optimizer = th.optim.Adam(net.parameters(), lr=1e-3)


# 3. learner
learner = Learner(net, optimizer, masked_cross_entropy, data, metrics=masked_accuracy)

# 4. fit
learner.fit(50)

# 5. test
learner.test(test_dl)

# 6. predict
learner.predict(test_dl)

# 7. plot
learner.recorder.plot_metrics()
plt.show()
예제 #7
0
        nn.Conv2d(8, 16, 3, padding=1, stride=2),
        nn.ReLU(),  # 7
        nn.Conv2d(16, 32, 3, padding=1, stride=2),
        nn.ReLU(),  # 4
        nn.Conv2d(32, 32, 3, padding=1, stride=2),
        nn.ReLU(),  # 2
        nn.AdaptiveAvgPool2d(1),
        Lambda(dp.flatten),
        nn.Linear(32, out_dim))


x_train, y_train, x_valid, y_valid, x_test, y_test = mnist()
x_train, x_valid, x_test = dp.normalize_to(x_train, x_valid, x_test)
train_ds, valid_ds, test_ds = Dataset(x_train, y_train), Dataset(
    x_valid, y_valid), Dataset(x_test, y_test)
bs = 512
train_dl = DataLoader(train_ds, batch_size=bs, shuffle=True)
valid_dl = DataLoader(valid_ds, batch_size=bs)
test_dl = DataLoader(test_ds, batch_size=bs)
data = DataBunch(train_dl, valid_dl)

model = cnn_model(10)
opt = optim.Adam(model.parameters(), lr=0.005)

loss_func = F.cross_entropy
learner = Learner(model, opt, loss_func, data, metrics=accuracy)

learner.fit(5)

plt.show()
예제 #8
0
train_dl = DataLoader(train_ds, batch_size=bs, shuffle=True)
valid_dl = DataLoader(valid_ds, batch_size=bs)
test_dl = DataLoader(test_ds, batch_size=bs)
data = DataBunch(train_dl, valid_dl)

in_dim = data.train_ds.x.shape[1]
h_dim = 128
model = nn.Sequential(nn.Linear(in_dim, h_dim), nn.ReLU(),
                      nn.Linear(h_dim, 10))
opt = optim.SGD(model.parameters(), lr=0.35)

loss_func = F.cross_entropy
cbks = Checkpoints(3)  # save checkpoint each 3 epochs
learner = Learner(model,
                  opt,
                  loss_func,
                  data,
                  metrics=[accuracy],
                  callbacks=cbks)

learner.fit(3)
learner.load_checkpoint()  # load the latest checkpoint
learner.fit(2)  # go on training

learner.test(test_dl)
pred = learner.predict(x_valid)
print(pred.size())

learner.recorder.plot_loss()
plt.show()
예제 #9
0
    def __init__(self):
        super(Net, self).__init__()
        self.gcn1 = GCN(1433, 16, F.relu)
        self.gcn2 = GCN(16, 7, None)

    def forward(self, g, features):
        x = self.gcn1(g, features)
        x = self.gcn2(g, x)
        return x

net = Net()
optimizer = th.optim.Adam(net.parameters(), lr=1e-3)


# 3. learner
learner = Learner(net, optimizer, masked_cross_entropy, data, metrics=masked_accuracy, callbacks=DGLGraphInterpreter)

# 4. fit
learner.fit(50)

# 5. test
learner.test(test_dl)


def loss_noreduction(pred, target):
    return F.cross_entropy(pred[target.mask], target.data[target.mask], reduction='none')


scores, xs, ys, preds, indecies = learner.interpreter.top_data(loss_noreduction, k=10, phase='train', largest=True)
learner.interpreter.plot_confusion(phase='train')
learner.interpreter.plot_confusion(phase='val')
예제 #10
0
            x = F.relu(x)
        x, _, _, batch, _, _ = self.graph_pooling(x, edge_index, None, batch)
        x = global_max_pool(x, batch)
        outputs = self.dense(x)
        outputs = F.relu(outputs)
        outputs = self.out(outputs)
        return outputs


model = Model(dataset.item_num, 2)
opt = optim.SGD(model.parameters(), lr=0.5)

# 3. learner
learner = Learner(model,
                  opt,
                  F.cross_entropy,
                  train_db,
                  metrics=[accuracy],
                  callbacks=PyGGraphInterpreter())

# 4. fit
learner.fit(3)

# 5. test
learner.test(test_dl)

loss = nn.CrossEntropyLoss(reduction='none')
scores, xs, ys, preds, indecies = learner.interpreter.top_data(loss,
                                                               k=10,
                                                               target='train',
                                                               largest=True)
예제 #11
0
    x_valid, y_valid), Dataset(x_test, y_test)
bs = 128
train_dl = DataLoader(train_ds, batch_size=bs, shuffle=True)
valid_dl = DataLoader(valid_ds, batch_size=bs)
test_dl = DataLoader(test_ds, batch_size=bs)
data = DataBunch(train_dl, valid_dl)

in_dim = data.train_ds.x.shape[1]
h_dim = 128
model = nn.Sequential(nn.Linear(in_dim, h_dim), nn.ReLU(),
                      nn.Linear(h_dim, 10))
opt = optim.SGD(model.parameters(), lr=0.35)

cbs = [
    LayerAnalysisCallback(
        forward=True),  # show outputs analysis of each sigle layer
    LayerAnalysisCallback(
        forward=True)  # show gradients analysis of each sigle layer
]
loss_func = F.cross_entropy
learner = Learner(model,
                  opt,
                  loss_func,
                  data,
                  metrics=[accuracy],
                  callbacks=cbs)

learner.fit(3)

plt.show()
예제 #12
0
        h = g.in_degrees().view(-1, 1).float()
        for conv in self.layers:
            h = conv(g, h)
        g.ndata['h'] = h
        hg = dgl.mean_nodes(g, 'h')
        return self.classify(hg)


model = Classifier(1, 256, train_ds.num_classes)
optimizer = optim.Adam(model.parameters(), lr=0.001)

# 3. learne
loss_func = nn.CrossEntropyLoss()
learner = Learner(model,
                  optimizer,
                  loss_func,
                  data,
                  metrics=accuracy,
                  callbacks=DGLInterpreter)

# 4. fit
learner.fit(80)

# 5. test
learner.test(test_dl)

loss = nn.CrossEntropyLoss(reduction='none')
scores, xs, ys, preds, indecies = learner.interpreter.top_data(loss,
                                                               k=10,
                                                               phase='train',
                                                               largest=True)
예제 #13
0
import torch.nn.functional as F
import torch.nn as nn
from torch import optim
import matplotlib.pyplot as plt
from bijou.learner import Learner
from bijou.data import Dataset, DataLoader, DataBunch
from bijou.datasets import mnist

x_train, y_train, x_valid, y_valid, x_test, y_test = mnist()
train_ds, valid_ds, test_ds = Dataset(x_train, y_train), Dataset(
    x_valid, y_valid), Dataset(x_test, y_test)
bs = 128
train_dl = DataLoader(train_ds, batch_size=bs, shuffle=True)
valid_dl = DataLoader(valid_ds, batch_size=bs)
test_dl = DataLoader(test_ds, batch_size=bs)
data = DataBunch(train_dl, valid_dl)

in_dim = data.train_ds.x.shape[1]
h_dim = 128
model = nn.Sequential(nn.Linear(in_dim, h_dim), nn.ReLU(),
                      nn.Linear(h_dim, 10))
opt = optim.SGD(model.parameters(), lr=0.35)

loss_func = F.cross_entropy
learner = Learner(model, opt, loss_func, data)

learner.find_lr(max_iter=100)

plt.show()
예제 #14
0
bs = 128
train_dl = DataLoader(train_ds, batch_size=bs, shuffle=True)
valid_dl = DataLoader(valid_ds, batch_size=bs)
test_dl = DataLoader(test_ds, batch_size=bs)
data = DataBunch(train_dl, valid_dl)

in_dim = data.train_ds.x.shape[1]
h_dim = 128
model = nn.Sequential(nn.Linear(in_dim, h_dim), nn.ReLU(),
                      nn.Linear(h_dim, 10))
opt = optim.SGD(model.parameters(), lr=1.5)

loss_func = F.cross_entropy
learner = Learner(model,
                  opt,
                  loss_func,
                  data,
                  metrics=[accuracy],
                  callbacks=EarlyStopping(patience=3))

learner.fit(10)
learner.test(test_dl)
pred = learner.predict(x_valid)

print(pred.size())

learner.recorder.plot_loss()
learner.recorder.plot_metrics()
plt.show()

# # 1. ------ 数据
# x_train, y_train, x_valid, y_valid = mnist_data()
예제 #15
0
파일: 01-simple.py 프로젝트: hitlic/bijou
import matplotlib.pyplot as plt

x_train, y_train, x_valid, y_valid, x_test, y_test = mnist()
train_ds, valid_ds, test_ds = Dataset(x_train, y_train), Dataset(
    x_valid, y_valid), Dataset(x_test, y_test)
bs = 128
train_dl = DataLoader(train_ds, batch_size=bs, shuffle=True)
valid_dl = DataLoader(valid_ds, batch_size=bs)
test_dl = DataLoader(test_ds, batch_size=bs)
# train_dl, valid_dl, test_dl = DataLoader.loaders(train_ds, valid_ds, test_ds, 128)
data = DataBunch(train_dl, valid_dl)

in_dim = data.train_ds.x.shape[1]
h_dim = 128
model = nn.Sequential(nn.Linear(in_dim, h_dim), nn.ReLU(),
                      nn.Linear(h_dim, 10))
opt = optim.SGD(model.parameters(), lr=0.35)

loss_func = F.cross_entropy
learner = Learner(model, opt, loss_func, data, metrics=[accuracy])

learner.fit(3)
learner.test(test_dl)
pred = learner.predict(x_valid)

print(pred.size())

learner.recorder.plot_loss()
learner.recorder.plot_metrics()
plt.show()
예제 #16
0
        for gcn in self.gcns:
            x = gcn(x, edge_index)
            x = F.relu(x)
        x, _, _, batch, _, _ = self.graph_pooling(x, edge_index, None, batch)
        x = global_max_pool(x, batch)
        outputs = self.dense(x)
        outputs = F.relu(outputs)
        outputs = self.out(outputs)
        return outputs

model = Model(dataset.item_num, 2)
opt = optim.SGD(model.parameters(), lr=0.5)


# 3. learner
learner = Learner(model, opt, F.cross_entropy, train_db, metrics=[accuracy])

# 4. fit
learner.fit(3)

# 5. test
learner.test(test_dl)

# 6. predict
pred = learner.predict(test_dl)
print(pred.size())

# 7. plot
learner.recorder.plot_metrics()
plt.show()
예제 #17
0
    def forward(self, g):
        # For undirected graphs, in_degree is the same as
        # out_degree.
        h = g.in_degrees().view(-1, 1).float()
        for conv in self.layers:
            h = conv(g, h)
        g.ndata['h'] = h
        hg = dgl.mean_nodes(g, 'h')
        return self.classify(hg)


model = Classifier(1, 256, train_ds.num_classes)
optimizer = optim.Adam(model.parameters(), lr=0.001)

# 3. learne
loss_func = nn.CrossEntropyLoss()
learner = Learner(model, optimizer, loss_func, data, metrics=accuracy)

# 4. fit
learner.fit(80)

# 5. test
learner.test(test_dl)

# 6. predict
learner.predict(test_dl)

# 7. plot
learner.recorder.plot_metrics()
plt.show()