def testNN2(runs=1,
            width=3,
            data="iris.txt",
            iters=20000,
            std=True,
            trainPct=0.666):
    if data == 'gauss':
        X, y = multimodalData(numModes=4, numPerMode=30)
        # X, y = sklearn.datasets.make_classification()
        XY = np.asarray(np.hstack([X, y.reshape((X.shape[0], 1))]))
    else:
        XY = data_io.read(data)
    nclass = len(set(XY[:, -1]))  # number of classes

    # y has nclass classes (0, ..., nclass-1)
    def unary(yi):
        return [(1 if i == yi else 0) for i in range(nclass)]

    # build a network
    u = width
    nn = modules.Sequential([
        modules.Linear(XY.shape[1] - 1, u),
        modules.Tanh(),
        modules.Linear(u, u),
        modules.Tanh(),
        modules.Linear(u, nclass),
        modules.SoftMax()
    ])
    results = {False: [], True: []}
    for run in range(runs):
        Xtrain, ytrain, Xtest, ytest = splitByClass(XY, trainPct)
        # Map into n softmax outputs
        Ytrain = np.array([unary(yi) for yi in ytrain])
        for rms in (False, True):
            # train the network.
            nn.clean()
            nn.train2(np.asarray(Xtrain),
                      np.asarray(Ytrain),
                      batchsize=1,
                      iters=iters,
                      lrate_decay_step=1000,
                      rms=rms,
                      momentum=(0.9 if rms else None))
            errors = predictionErrors(nn, Xtest, ytest)
            accuracy = 1.0 - (float(errors) / Xtest.shape[0])
            print 'RMS', rms, 'Prediction accuracy', accuracy
            results[rms].append(accuracy)
    print 'Results', results
    print 'Average accuracy', 'rms=False', sum(
        results[False]) / runs, 'rms=True', sum(results[True]) / runs,
Пример #2
0
    def test_forwardWrongInputListDimension(self):
        m = M.Tanh()

        try:
            m.forward(FloatTensor([[2]]), FloatTensor([[2]]))
        except ValueError:
            return 0

        return 1
Пример #3
0
 def __init__(self):
     super(SimpleCNN, self).__init__()
     self.conv1 = nn.Conv2d(in_channels=3,
                            out_channels=6,
                            kernel_size=3,
                            stride=3,
                            padding=2)
     self.tanh1 = nn.Tanh()
     self.conv2 = nn.Conv2d(in_channels=6,
                            out_channels=10,
                            kernel_size=3,
                            stride=3,
                            padding=3)
     self.tanh2 = nn.Tanh()
     self.dropout2d = nn.Dropout2d(rate=0.5)
     self.flatten = nn.Flatten()
     self.linear = nn.Linear(in_dimension=360, out_dimension=10)
     self.softmax = nn.Softmax()
     self.set_forward()
Пример #4
0
    def test_backwardBeforeForward(self):
        input = FloatTensor([[1, -3, 1], [0, -1, 2]])

        m = M.Tanh()

        try:
            output = m.backward(input)
        except:
            return 0

        return 1
Пример #5
0
    def test_forwardCorrectOutput(self):
        input = FloatTensor([[1, -3, 1], [0, -1, 2]])
        expected_output = FloatTensor([[0.761594155955765,-0.995054753686730,0.761594155955765],\
                                       [0,-0.761594155955765,0.964027580075817]])

        m = M.Tanh()
        output = m.forward(input)

        if not areEqual(output, expected_output, tol=1e-5):
            return 1

        return 0
Пример #6
0
    def test_backwardCorrectOutput(self):
        input = FloatTensor([[1, -3, 1], [0, -1, 2]])

        tanh_value = FloatTensor([[0.761594155955765,-0.995054753686730,0.761594155955765],\
                                  [0,-0.761594155955765,0.964027580075817]])

        dsigma = 1 - tanh_value * tanh_value

        m = M.Tanh()
        m.forward(input)
        grad = FloatTensor([[1, 2, 3], [3, 2, 1]])

        output = m.backward(grad)
        output_expected = dsigma * grad

        if not areEqual(output, output_expected):
            return 1

        return 0
Пример #7
0
import tools.model_io
import modules
import shutil
import settings

# user init
batchsize = 10
numbIters = 10000

# load data
X, Y = tools.data_loader.load_data()

# setup neural network
nn = modules.Sequential([
    modules.Linear(settings.nrOfPixels, 4),
    modules.Tanh(),
    modules.Linear(4, 4),
    modules.Tanh(),
    modules.Linear(4, 2),
    modules.SoftMax()
])

# train neural network
nn.train(X['train'],
         Y['train'],
         Xval=X['valid'],
         Yval=Y['valid'],
         batchsize=batchsize,
         iters=numbIters)

# determine training name of neural net
def testNN(iters=10000, width=3, learning_rates=[0.005], std=True):
    # Test cases
    # X, y = xor()
    # X, y = xor_more()
    X, y = multimodalData(numModes=4)
    # X, y = sklearn.datasets.make_moons(noise=0.25)
    # X, y = sklearn.datasets.make_classification()
    # y has 2 classes (0, 1)

    # Map into 2 softmax outputs
    nclass = len(set(listify(y)))

    def unary(yi):
        return [(1 if i == yi else 0) for i in range(nclass)]

    Y = np.array([unary(yi) for yi in y])
    #build a network
    nd = X.shape[1]  # number of features
    u = width
    nn = modules.Sequential([
        modules.Linear(nd, u),
        modules.Tanh(),
        modules.Linear(u, u),
        modules.Tanh(),
        modules.Linear(u, nclass),
        modules.SoftMax()
    ])

    # train the network.
    errors = []
    for lrate in learning_rates:
        nn.clean()
        # Default does not do learning rate decay or early stopping.
        #print ('X,Y,',X,Y)

        nn.train2(np.asarray(X),
                  np.asarray(Y),
                  batchsize=1,
                  iters=iters,
                  lrate=lrate,
                  lrate_decay_step=100000)
        errors.append((lrate, predictionErrors(nn, X, y)))
    print 'Errors for learning rates', errors

    # Plot the last result
    if nd > 2: return  # only plot two-feature cases
    eps = .1
    xmin = np.min(X[:, 0]) - eps
    xmax = np.max(X[:, 0]) + eps
    ymin = np.min(X[:, 1]) - eps
    ymax = np.max(X[:, 1]) + eps
    ax = tidyPlot(xmin, xmax, ymin, ymax, xlabel='x', ylabel='y')

    def fizz(x1, x2):
        y = nn.forward(np.array([x1, x2]))
        return y[0, 1]  # class 1

    res = 30  # resolution of plot
    ima = np.array([[fizz(xi, yi) for xi in np.linspace(xmin, xmax, res)] \
                                for yi in np.linspace(ymin, ymax, res)])
    im = ax.imshow(np.flipud(ima),
                   interpolation='none',
                   extent=[xmin, xmax, ymin, ymax],
                   cmap='viridis' if std else 'jet')
    plt.colorbar(im)
    if std:
        colors = [('r' if l == 0 else 'g') for l in y]
        ax.scatter(X[:, 0],
                   X[:, 1],
                   c=colors,
                   marker='o',
                   s=80,
                   edgecolors='none')
    else:
        pinds = np.where(y == 0)
        ninds = np.where(y == 1)
        plt.plot(X[pinds[0], 0], X[pinds[0], 1], 'ob')
        plt.plot(X[ninds[0], 0], X[ninds[0], 1], 'or')
Пример #9
0
#Data normalization
mean, std = inputs.mean(), inputs.std()

train_data.sub_(mean).div_(std)
validation_data.sub_(mean).div_(std)
test_data.sub_(mean).div_(std)

#Instantiate the model

Input_Units = 2
Output_Units = 2
Hidden_Units = 25

model = m.Sequential(m.Linear(Input_Units, Hidden_Units), m.ReLU(),
                     m.Linear(Hidden_Units, Hidden_Units), m.ReLU(),
                     m.Linear(Hidden_Units, Hidden_Units), m.Tanh(),
                     m.Linear(Hidden_Units, Output_Units), m.Tanh())

#Instantiate the optimizer
lr = 0.00095
sgd = m.SGD(params=model.param(), lr=lr)

#Train the model
EPOCHS = 150

model, train_error, validation_error = h.train_model(train_data, train_targets,\
                                        validation_data, validation_targets, model, sgd, nb_epochs = EPOCHS)
'''
#Plot both train and validation errors wrt the number of epochs

fig = plt.figure(figsize=(9, 5))
Пример #10
0
train_xor = True
train_mnist = True

if train_xor:
    D,N = 2,200000

    #this is the XOR problem.
    X = np.random.rand(N,D) #we want [NxD] data
    X = (X > 0.5)*1.0
    Y = X[:,0] == X[:,1]
    Y = (np.vstack((Y, np.invert(Y)))*1.0).T # and [NxC] labels

    X += np.random.randn(N,D)*0.1 # add some noise to the data.

    #build a network
    nn = modules.Sequential([modules.Linear(2,3), modules.Tanh(),modules.Linear(3,15), modules.Tanh(), modules.Linear(15,15), modules.Tanh(), modules.Linear(15,3), modules.Tanh() ,modules.Linear(3,2), modules.SoftMax()])
    #train the network.
    nn.train(X,Y, batchsize = 5, iters=1000)
    acc = np.mean(np.argmax(nn.forward(X), axis=1) == np.argmax(Y, axis=1))
    if not np == numpy: # np=cupy
        acc = np.asnumpy(acc)
    print('model train accuracy is: {:0.4f}'.format(acc))

    #save the network
    model_io.write(nn, '../xor_net_small_1000.txt')

if train_mnist:

    Xtrain = data_io.read('../data/MNIST/train_images.npy')
    Ytrain = data_io.read('../data/MNIST/train_labels.npy')
    Xtest = data_io.read('../data/MNIST/test_images.npy')
Пример #11
0
loadData.normalize_data(test_input)


########## modules and model #########

# define optimizers and losses as list, to be able to juggle with them
optimizers = [optim.SGDOptimizer, optim.SGDmomOptimizer, optim.AdamOptimizer, optim.BFGSOptimizer]
losses = [modules.LossMSE]

# define layers and activations
Lin1 = modules.Linear(2,25)
Lin2 = modules.Linear(25,25)
Lin3 = modules.Linear(25,2)
act1 = modules.ReLU()
act2 = modules.ReLU()
act3 = modules.Tanh()
#act4 = modules.Sigmoid()

# combine the layers together
layers = [
    Lin1,
    act1,
    Lin2,
    act2,
    Lin3,
    act3]

# set parameters for the run
lr = 0.005 # learning rate, for BFGS multiply by 10 to 100
epochs = 250 # epochs for the run
mini_batch_size = 50 # mini_batch_size for the run