Beispiel #1
0
from mlpcode.network import Network
from mlpcode.utils import DATASETS, MODELDIR, loadDataset, normalize
from mlpcode.activation import ActivationFuncs as af
from mlpcode.callbacks import ErrorCallback

useGpu = False
binarized = True
ds = DATASETS.mnist

modelPth = MODELDIR / "bnn_pytorch_1000Ep_PYTOR-7.hdf5"
assert modelPth.exists()

_, _, testX, testY = loadDataset(ds, useGpu=useGpu)
del _
testX = normalize(testX, newMin=-1., newMax=1.)
# pList = [0.01, 0.02, 0.05, 0.1, 0.2]

nn = Network.fromModel(modelPth, useGpu=useGpu, binarized=binarized)
nn.compile(hiddenAf=af.sign, outAf=af.softmax)
acc = nn.get_accuracy(testX, testY)
print(f"P: None\tAcc: {acc:0.02f}")

cb = ErrorCallback(0.1, mode=2, bnn=binarized)
nn.addCallbacks(cb)
acc = nn.get_accuracy(testX, testY)
print(f"P: 0.1\tAcc: {acc:0.02f}")

nn.clearCallbacks()

cb = ErrorCallback(0.2, mode=2, bnn=binarized)
nn.addCallbacks(cb)
Beispiel #2
0
del _

st = time()

lst = []
for i in range(8):
    nn = Network.fromModel(MODELDIR / f"{i}.hdf5",
                           useGpu=useGpu,
                           binarized=binarized)
    nn.compile(hiddenAf=af.sign, outAf=af.identity)
    p = 0.14
    icb = ImageErrorCallback(p)
    runningSum = 0.
    for _ in range(100):
        newX = icb(testX, gpu=useGpu)
        newX = normalize(newX, newMin=-1, newMax=1)
        acc = nn.get_accuracy(newX, testY)
        runningSum += acc
    totalAcc = runningSum / 100
    print(f"p: {p} \tAcc: {totalAcc:0.02f}\t")
    lst.append(totalAcc)

end = time()

print(f'\n\nTotalTime: {end-st:0.5f}')

# nn = Network.fromModel(modelPth, useGpu=useGpu, binarized=binarized)
# nn.compile(hiddenAf=af.sign, outAf=af.softmax)
# acc = nn.get_accuracy(testX, testY)
# print(f"P: None\tAcc: {acc:0.02f}")
#
Beispiel #3
0
from mlpcode.network import Network
from mlpcode.activation import ActivationFuncs as af
from mlpcode.utils import DATASETS, loadDataset, MODELDIR, normalize
from mlpcode.callbacks import ErrorCallback

useGpu = False
binarized = True
ds = DATASETS.mnist

modelPath = MODELDIR / "bnn_mnist_1024_1024_weightFlip.hdf5"
assert modelPath.exists()

# nn= Network([784, 256, 10], useGpu=useGpu, useBatchNorm=True)
nn = Network.fromModel(modelPath, useGpu=useGpu, binarized=binarized)

# errorCb = ErrorCallback(3, 0.2, mode=2, bnn=binarized)
# nn.addCallbacks(errorCb, num_layers=1)

nn.compile(hiddenAf=af.sign, outAf=af.identity)

_, _, testX, testY = loadDataset(ds, useGpu=useGpu)
testX = normalize(testX)

acc = nn.get_accuracy(testX, testY)
print(f"{ds}:\t{acc:0.2f} %")
Beispiel #4
0
import pickle

from mlpcode.activation import ActivationFuncs as af
from mlpcode.loss import LossFuncs as lf
from mlpcode.network import Network
from mlpcode.optim import LRScheduler, LRSchedulerStrat as LRS
from mlpcode.utils import DATASETS, loadDataset, MODELDIR, split_train_valid, normalize

useGpu = True
binarized = True
dataset = DATASETS.mnist
print("Loading {}".format(dataset))
trainX, trainY, testX, testY = loadDataset(dataset, useGpu=useGpu)

trainX = normalize(trainX)
testX = normalize(testX)

trainX, valX, trainY, valY = split_train_valid(trainX, trainY)

print("Finished loading {} data".format(dataset))

layers = [trainX.shape[1], 500, 500, 10]
epochs = 1000
batchSize = 200
lrStart = 0.03
lrEnd = 3e-7
lr = 0.01
# lr = LRScheduler(
#     alpha=lrStart, decay_rate=(lrStart - lrEnd) ** (1 / epochs), strategy=LRS.exp
# )