def testNN2(runs=1,
            width=3,
            data="iris.txt",
            iters=20000,
            std=True,
            trainPct=0.666):
    if data == 'gauss':
        X, y = multimodalData(numModes=4, numPerMode=30)
        # X, y = sklearn.datasets.make_classification()
        XY = np.asarray(np.hstack([X, y.reshape((X.shape[0], 1))]))
    else:
        XY = data_io.read(data)
    nclass = len(set(XY[:, -1]))  # number of classes

    # y has nclass classes (0, ..., nclass-1)
    def unary(yi):
        return [(1 if i == yi else 0) for i in range(nclass)]

    # build a network
    u = width
    nn = modules.Sequential([
        modules.Linear(XY.shape[1] - 1, u),
        modules.Tanh(),
        modules.Linear(u, u),
        modules.Tanh(),
        modules.Linear(u, nclass),
        modules.SoftMax()
    ])
    results = {False: [], True: []}
    for run in range(runs):
        Xtrain, ytrain, Xtest, ytest = splitByClass(XY, trainPct)
        # Map into n softmax outputs
        Ytrain = np.array([unary(yi) for yi in ytrain])
        for rms in (False, True):
            # train the network.
            nn.clean()
            nn.train2(np.asarray(Xtrain),
                      np.asarray(Ytrain),
                      batchsize=1,
                      iters=iters,
                      lrate_decay_step=1000,
                      rms=rms,
                      momentum=(0.9 if rms else None))
            errors = predictionErrors(nn, Xtest, ytest)
            accuracy = 1.0 - (float(errors) / Xtest.shape[0])
            print 'RMS', rms, 'Prediction accuracy', accuracy
            results[rms].append(accuracy)
    print 'Results', results
    print 'Average accuracy', 'rms=False', sum(
        results[False]) / runs, 'rms=True', sum(results[True]) / runs,
def testNN(iters=10000, width=3, learning_rates=[0.005], std=True):
    # Test cases
    # X, y = xor()
    # X, y = xor_more()
    X, y = multimodalData(numModes=4)
    # X, y = sklearn.datasets.make_moons(noise=0.25)
    # X, y = sklearn.datasets.make_classification()
    # y has 2 classes (0, 1)

    # Map into 2 softmax outputs
    nclass = len(set(listify(y)))

    def unary(yi):
        return [(1 if i == yi else 0) for i in range(nclass)]

    Y = np.array([unary(yi) for yi in y])
    #build a network
    nd = X.shape[1]  # number of features
    u = width
    nn = modules.Sequential([
        modules.Linear(nd, u),
        modules.Tanh(),
        modules.Linear(u, u),
        modules.Tanh(),
        modules.Linear(u, nclass),
        modules.SoftMax()
    ])

    # train the network.
    errors = []
    for lrate in learning_rates:
        nn.clean()
        # Default does not do learning rate decay or early stopping.
        #print ('X,Y,',X,Y)

        nn.train2(np.asarray(X),
                  np.asarray(Y),
                  batchsize=1,
                  iters=iters,
                  lrate=lrate,
                  lrate_decay_step=100000)
        errors.append((lrate, predictionErrors(nn, X, y)))
    print 'Errors for learning rates', errors

    # Plot the last result
    if nd > 2: return  # only plot two-feature cases
    eps = .1
    xmin = np.min(X[:, 0]) - eps
    xmax = np.max(X[:, 0]) + eps
    ymin = np.min(X[:, 1]) - eps
    ymax = np.max(X[:, 1]) + eps
    ax = tidyPlot(xmin, xmax, ymin, ymax, xlabel='x', ylabel='y')

    def fizz(x1, x2):
        y = nn.forward(np.array([x1, x2]))
        return y[0, 1]  # class 1

    res = 30  # resolution of plot
    ima = np.array([[fizz(xi, yi) for xi in np.linspace(xmin, xmax, res)] \
                                for yi in np.linspace(ymin, ymax, res)])
    im = ax.imshow(np.flipud(ima),
                   interpolation='none',
                   extent=[xmin, xmax, ymin, ymax],
                   cmap='viridis' if std else 'jet')
    plt.colorbar(im)
    if std:
        colors = [('r' if l == 0 else 'g') for l in y]
        ax.scatter(X[:, 0],
                   X[:, 1],
                   c=colors,
                   marker='o',
                   s=80,
                   edgecolors='none')
    else:
        pinds = np.where(y == 0)
        ninds = np.where(y == 1)
        plt.plot(X[pinds[0], 0], X[pinds[0], 1], 'ob')
        plt.plot(X[ninds[0], 0], X[ninds[0], 1], 'or')
Пример #3
0
import modules
import shutil
import settings

# user init
batchsize = 10
numbIters = 10000

# load data
X, Y = tools.data_loader.load_data()

# setup neural network
nn = modules.Sequential([
    modules.Linear(32**2, 1),
    modules.NegAbs(),
    modules.Tanh(),
    modules.Linear(1, 2),
    modules.SoftMax()
])

# train neural network
nn.train(X['train'],
         Y['train'],
         Xval=X['valid'],
         Yval=Y['valid'],
         batchsize=batchsize,
         iters=numbIters)

# determine training name of neural net
nnName = 'nn_' + nn.name + ('_(batchsize_{}_number_iterations_{})'.format(
    batchsize, numbIters))
Пример #4
0
import tools.model_io
import modules
import shutil
import settings

# user init
batchsize = 10
numbIters = 1000

# load data
X, Y = tools.data_loader.load_data()

# setup neural network
nn = modules.Sequential([
    modules.Linear(settings.nrOfPixels, 4),
    # modules.Rect(),
    # modules.Linear(4, 4),
    modules.SoftMax()
])

# train neural network
nn.train(X['train'],
         Y['train'],
         Xval=X['valid'],
         Yval=Y['valid'],
         batchsize=batchsize,
         iters=numbIters)

# determine training name of neural net
nnName = 'nn_' + nn.name + ('_(batchsize_{}_number_iterations_{})'.format(
    batchsize, numbIters))
Пример #5
0
def roar_kar(keep, random=False, train_only=False):

    logdir = 'tf_logs/standard/'

    def get_savedir():

        savedir = logdir.replace('tf_logs', 'KAR' if keep else 'ROAR')

        if not os.path.exists(savedir):

            os.makedirs(savedir)

        return savedir


#     ratio = 0.1

    percentiles = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
    attribution_methods = ['normal', 'LRP', 'proposed_method']

    if not train_only:
        DNN = model_io.read('../models/MNIST/LeNet-5.nn')
        for v in attribution_methods:
            batch_size = 128
            print("{} Step is start".format(v))
            if random:
                print("{} percentile Remove".format(v))
                occlude_dataset(DNN=DNN,
                                attribution=v,
                                percentiles=percentiles,
                                random=True,
                                keep=keep,
                                batch_size=batch_size,
                                savedir=get_savedir())
            else:
                print("{} Random Remove".format(v))
                occlude_dataset(DNN=DNN,
                                attribution=v,
                                percentiles=percentiles,
                                random=False,
                                keep=keep,
                                batch_size=batch_size,
                                savedir=get_savedir())
            print("{} : occlude step is done".format(v))
        print("ress record")
    ress = {k: [] for k in attribution_methods}

    for _ in range(3):

        for v in attribution_methods:

            res = []

            for p in percentiles:

                occdir = get_savedir() + '{}_{}_{}.pickle'.format('{}', v, p)
                occdir_y = get_savedir() + '{}_{}_{}_{}.pickle'.format(
                    '{}', v, p, 'label')

                data_train = unpickle(occdir.format('train'))
                #                 data_test = unpickle(occdir.format('test'))
                Xtrain = np.array(data_train)
                Ytrain = unpickle(occdir_y.format('train'))
                Ytrain = np.array(Ytrain)
                Xtest = data_io.read('../data/MNIST/test_images.npy')
                Ytest = data_io.read('../data/MNIST/test_labels.npy')
                print("check : {}".format(Ytrain.shape))

                Xtest = scale(Xtest)
                Xtest = np.reshape(Xtest, [Xtest.shape[0], 28, 28, 1])
                Xtest = np.pad(Xtest, ((0, 0), (2, 2), (2, 2), (0, 0)),
                               'constant',
                               constant_values=(-1., ))
                Ix = Ytest[:, 0].astype(int)
                Ytest = np.zeros([Xtest.shape[0], np.unique(Ytest).size])
                Ytest[np.arange(Ytest.shape[0]), Ix] = 1
                print(occdir)

                #                 DNN = model_io.read('../models/MNIST/LeNet-5.nn')

                DNN = modules.Sequential([
                                modules.Convolution(filtersize=(5,5,1,10),stride = (1,1)),\
                                modules.Rect(),\
                                modules.SumPool(pool=(2,2),stride=(2,2)),\
                                modules.Convolution(filtersize=(5,5,10,25),stride = (1,1)),\
                                modules.Rect(),\
                                modules.SumPool(pool=(2,2),stride=(2,2)),\
                                modules.Convolution(filtersize=(4,4,25,100),stride = (1,1)),\
                                modules.Rect(),\
                                modules.SumPool(pool=(2,2),stride=(2,2)),\
                                modules.Convolution(filtersize=(1,1,100,10),stride = (1,1)),\
                                modules.Flatten()
                            ])
                print("training...")
                DNN.train(X=Xtrain,\
                    Y=Ytrain,\
                    Xval=Xtest,\
                    Yval=Ytest,\
                    iters=10**5,\
                    lrate=0.0001,\
#                     status = 2,\
                    batchsize = 128
                         )
                #                 ypred = DNN.forward(Xtest)

                acc = np.mean(
                    np.argmax(DNN.forward(Xtest), axis=1) == np.argmax(Ytest,
                                                                       axis=1))
                del DNN
                print('metric model test accuracy is: {:0.4f}'.format(acc))

                res.append(acc)
            print("End of {}:training, accuracy...".format(_))

            ress[v].append(res)
    print("metric...")
    res_mean = {v: np.mean(v, axis=0) for v in ress.item()}

    print(res_mean)

    return res_mean
Пример #6
0
import modules
import shutil
import settings

# user init
batchsize = 10
numbIters = 10000

# load data
X, Y = tools.data_loader.load_data()

# setup neural network
nn = modules.Sequential([
    modules.Linear(settings.nrOfPixels, 16),
    modules.Tanh(),
    modules.Linear(16, 16),
    modules.Tanh(),
    modules.Linear(16, 3),
    modules.SoftMax()
])

# train neural network
nn.train(X['train'],
         Y['train'],
         Xval=X['valid'],
         Yval=Y['valid'],
         batchsize=batchsize,
         iters=numbIters)

# determine training name of neural net
nnName = 'nn_' + nn.name + ('_(batchsize_{}_number_iterations_{})'.format(
    batchsize, numbIters))
Пример #7
0
import tools.model_io
import modules
import shutil
import settings

# user init
batchsize = 25
numbIters = 10000

# load data
X, Y = tools.data_loader.load_data()

# setup neural network
nn = modules.Sequential([
    modules.Linear(32**2, 1),
    modules.Rect(),
    modules.Linear(1, 2),
    modules.SoftMax()
])

# train neural network
nn.train(X['train'],
         Y['train'],
         Xval=X['valid'],
         Yval=Y['valid'],
         batchsize=batchsize,
         iters=numbIters)

# determine training name of neural net
nnName = 'nn_' + nn.name + ('_(batchsize_{}_number_iterations_{})'.format(
    batchsize, numbIters))
Пример #8
0
import tools.model_io
import modules
import shutil
import settings

# user init
batchsize = 25
numbIters = 20000

# load data
X, Y = tools.data_loader.load_data()

# setup neural network
nn = modules.Sequential([modules.Linear(settings.nrOfPixels, 2),
                         modules.Rect(),
                         modules.Linear(2, Y[settings.kinds[0]].shape[-1]),
                         modules.SoftMax()
                         ])

# train neural network
nn.train(X['train'],
         Y['train'],
         Xval=X['valid'],
         Yval=Y['valid'],
         batchsize=batchsize,
         iters=numbIters)

# determine training name of neural net
nnName = 'nn_' + nn.name + ('_(batchsize_{}_number_iterations_{})'
                            .format(batchsize, numbIters))
Пример #9
0
train_xor = True
train_mnist = True

if train_xor:
    D,N = 2,200000

    #this is the XOR problem.
    X = np.random.rand(N,D) #we want [NxD] data
    X = (X > 0.5)*1.0
    Y = X[:,0] == X[:,1]
    Y = (np.vstack((Y, np.invert(Y)))*1.0).T # and [NxC] labels

    X += np.random.randn(N,D)*0.1 # add some noise to the data.

    #build a network
    nn = modules.Sequential([modules.Linear(2,3), modules.Tanh(),modules.Linear(3,15), modules.Tanh(), modules.Linear(15,15), modules.Tanh(), modules.Linear(15,3), modules.Tanh() ,modules.Linear(3,2), modules.SoftMax()])
    #train the network.
    nn.train(X,Y, batchsize = 5, iters=1000)
    acc = np.mean(np.argmax(nn.forward(X), axis=1) == np.argmax(Y, axis=1))
    if not np == numpy: # np=cupy
        acc = np.asnumpy(acc)
    print('model train accuracy is: {:0.4f}'.format(acc))

    #save the network
    model_io.write(nn, '../xor_net_small_1000.txt')

if train_mnist:

    Xtrain = data_io.read('../data/MNIST/train_images.npy')
    Ytrain = data_io.read('../data/MNIST/train_labels.npy')
    Xtest = data_io.read('../data/MNIST/test_images.npy')
Пример #10
0
#Data normalization
mean, std = inputs.mean(), inputs.std()

train_data.sub_(mean).div_(std)
validation_data.sub_(mean).div_(std)
test_data.sub_(mean).div_(std)

#Instantiate the model

Input_Units = 2
Output_Units = 2
Hidden_Units = 25

model = m.Sequential(m.Linear(Input_Units, Hidden_Units), m.ReLU(),
                     m.Linear(Hidden_Units, Hidden_Units), m.ReLU(),
                     m.Linear(Hidden_Units, Hidden_Units), m.Tanh(),
                     m.Linear(Hidden_Units, Output_Units), m.Tanh())

#Instantiate the optimizer
lr = 0.00095
sgd = m.SGD(params=model.param(), lr=lr)

#Train the model
EPOCHS = 150

model, train_error, validation_error = h.train_model(train_data, train_targets,\
                                        validation_data, validation_targets, model, sgd, nb_epochs = EPOCHS)
'''
#Plot both train and validation errors wrt the number of epochs

fig = plt.figure(figsize=(9, 5))
Пример #11
0
I = Ytrain[:, 0].astype(int)
Ytrain = np.zeros([Xtrain.shape[0], np.unique(Ytrain).size])
Ytrain[np.arange(Ytrain.shape[0]), I] = 1

I = Ytest[:, 0].astype(int)
Ytest = np.zeros([Xtest.shape[0], np.unique(Ytest).size])
Ytest[np.arange(Ytest.shape[0]), I] = 1

#model a network according to LeNet-5 architecture
lenet = modules.Sequential([
                            modules.Convolution(filtersize=(5,5,1,10),stride = (1,1)),\
                            modules.Rect(),\
                            modules.SumPool(pool=(2,2),stride=(2,2)),\
                            modules.Convolution(filtersize=(5,5,10,25),stride = (1,1)),\
                            modules.Rect(),\
                            modules.SumPool(pool=(2,2),stride=(2,2)),\
                            modules.Convolution(filtersize=(4,4,25,100),stride = (1,1)),\
                            modules.Rect(),\
                            modules.SumPool(pool=(2,2),stride=(2,2)),\
                            modules.Convolution(filtersize=(1,1,100,10),stride = (1,1)),\
                            modules.Flatten()
                        ])

#train the network.
lenet.train(   X=Xtrain,\
                Y=Ytrain,\
                Xval=Xtest,\
                Yval=Ytest,\
                iters=10**6,\
                lrate=0.001,\
                batchsize=25)
Пример #12
0
layers = [
    Lin1,
    act1,
    Lin2,
    act2,
    Lin3,
    act3]

# set parameters for the run
lr = 0.005 # learning rate, for BFGS multiply by 10 to 100
epochs = 250 # epochs for the run
mini_batch_size = 50 # mini_batch_size for the run

# initialize loss, model and optimizer
loss = losses[0]() # [0] for MSELoss
model = modules.Sequential(layers, loss)
optimizer = optimizers[2](model, lr) # [2] for Adam optimizer

# train the model
print('\n----------------------------------------')
print('          TRAINING THE MODEL          \n\n')
loss_list, train_acc = trainAndTest.train(model, optimizer, loss, train_input, train_target,\
                                          epochs, mini_batch_size, verbose = False)

# print results
print('\n----------------------------------------')
print('          RESULTS          \n')
print('learning rate: ', lr, '  ||  epochs: ', epochs, '  ||  mini_batch_size: ', mini_batch_size)
print('optimizer: ', optimizer.name)
print('minimum training loss: ',min(loss_list))
print('maximum training accuracy on train: ',max(train_acc)/train_input.size(0)*100,'%')
Пример #13
0
import tools.model_io
import modules
import shutil
import settings

# user init
batchsize = 10
numbIters = 100000

# load data
X, Y = tools.data_loader.load_data()

# setup neural network
nn = modules.Sequential([
    modules.Linear(32**2, 2),
    modules.Tanh(),
    modules.Linear(2, 2),
    modules.SoftMax()
])

# train neural network
nn.train(X['train'],
         Y['train'],
         Xval=X['valid'],
         Yval=Y['valid'],
         batchsize=batchsize,
         iters=numbIters)

# determine training name of neural net
nnName = 'nn_' + nn.name + ('_(batchsize_{}_number_iterations_{})'.format(
    batchsize, numbIters))
Пример #14
0
import modules
import shutil
import settings

# user init
batchsize = 10
numbIters = 10000

# load data
X, Y = tools.data_loader.load_data()

# setup neural network
nn = modules.Sequential([
    modules.Linear(settings.nrOfPixels, 4),
    modules.Tanh(),
    modules.Linear(4, 4),
    modules.Tanh(),
    modules.Linear(4, 2),
    modules.SoftMax()
])

# train neural network
nn.train(X['train'],
         Y['train'],
         Xval=X['valid'],
         Yval=Y['valid'],
         batchsize=batchsize,
         iters=numbIters)

# determine training name of neural net
nnName = 'nn_' + nn.name + ('_(batchsize_{}_number_iterations_{})'.format(
    batchsize, numbIters))
Пример #15
0
train_target = convert_to_one_hot_labels(train_input, train_target)
test_target = convert_to_one_hot_labels(test_input, test_target)
# Avoid vanishing gradient
train_input = 0.9 * train_input
# Normalize data
mean, std = train_input.mean(), train_input.std()
train_input.sub_(mean).div_(std)
test_input.sub_(mean).div_(std)

# Define models
model1 = modules.Sequential(modules.Linear(2, 25),
                            modules.TanH(),
                            modules.Linear(25, 25),
                            modules.TanH(),
                            modules.Linear(25, 25),
                            modules.TanH(),
                            modules.Linear(25, 25),
                            modules.TanH(),
                            modules.Linear(25, 2),
                            modules.MSELoss()
                           )
model2 = modules.Sequential(modules.Linear(2, 25),
                            modules.ReLU(),
                            modules.Linear(25, 25),
                            modules.ReLU(),
                            modules.Linear(25, 25),
                            modules.ReLU(),
                            modules.Linear(25, 25),
                            modules.ReLU(),
                            modules.Linear(25, 2),
                            modules.MSELoss()
Пример #16
0
import modules
import shutil
import settings

# user init
batchsize = 10
numbIters = 10000

# load data
X, Y = tools.data_loader.load_data()

# setup neural network
nn = modules.Sequential([
    modules.Linear(32**2, 2),
    modules.NegAbs(),
    modules.BinStep(),
    modules.Linear(2, 2),
    modules.SoftMax()
])

# train neural network
nn.train(X['train'],
         Y['train'],
         Xval=X['valid'],
         Yval=Y['valid'],
         batchsize=batchsize,
         iters=numbIters)

# determine training name of neural net
nnName = 'nn_' + nn.name + ('_(batchsize_{}_number_iterations_{})'.format(
    batchsize, numbIters))
Пример #17
0
@version: 1.0
@copyright: Copyright (c)  2015, Sebastian Lapuschkin, Alexander Binder, Gregoire Montavon, Klaus-Robert Mueller
@license : BSD-2-Clause
'''

import modules
import model_io

import numpy as np ; na = np.newaxis

D,N = 2,200000

#this is the XOR problem.
X = np.random.rand(N,D) #we want [NxD] data
X = (X > 0.5)*1.0
Y = X[:,0] == X[:,1]
Y = (np.vstack((Y, np.invert(Y)))*1.0).T # and [NxC] labels

X += np.random.randn(N,D)*0.1 # add some noise to the data.

#build a network
nn = modules.Sequential([modules.Linear(2,3), modules.Tanh(),modules.Linear(3,15), modules.Tanh(), modules.Linear(15,15), modules.Tanh(), modules.Linear(15,3), modules.Tanh() ,modules.Linear(3,2), modules.SoftMax()])
#train the network.
nn.train(X,Y,Xval=X,Yval=Y, batchsize = 5)

#save the network
#model_io.write(nn, '../xor_net_small_1000.txt')