Esempio n. 1
0
def NeuralNetwork(X, z, test=False):
    """Wrapper for a neural network. Trains a neural network using X and z.

    Args:
        X (np.ndarray): Input data the network is to be trained on.
        z (np.ndarray): Response data the network is to be trained against.
        test (bool, optional): If true, will search a hard-coded parameter-
                               space for optimal parameters instead of 
                               training a network. Defaults to False.

    Returns:
        (float, list): (score reached, [testing set prediction, testing set])
    """
    if not test:
        hiddenLayers = 2
        hiddenNeurons = 64
        epochN = 500
        minibatchSize = 32
        eta = (None, 1e-03)
        lmbd = 1e-06
        alpha = 1e-00
        activationFunction = sigmoid
        outputFunction = softMax

        Xtr, Xte, ztr, zte = train_test_split(X, z)

        network = NN(hiddenNN=hiddenNeurons, hiddenLN=hiddenLayers)
        network.giveInput(Xtr, ztr)
        network.giveParameters(epochN=epochN,
                               minibatchSize=minibatchSize,
                               eta=etaDefinerDefiner(eta[0], eta[1]),
                               lmbd=lmbd,
                               alpha=alpha,
                               activationFunction=activationFunction,
                               outputFunction=outputFunction)
        network.train(splitData=False)

        network.predict(Xte, zte)

        return network.score, [network.predictedLabel, zte]

    else:
        # Benchmarking parameters; random search
        parameters = {
            "hiddenLN": [0, 1, 2, 4],
            "hiddenNN": [16, 32, 64, 128, 256],
            "epochN": [500],
            "minibatchSize": [32, 64],
            "eta": [[j, i**k] for i in np.logspace(0, 6, 7)
                    for j, k in [(1, 1), (None, -1)]],
            "lmbd":
            np.logspace(-1, -6, 3),
            "alpha":
            np.logspace(-0, -1, 1),
            "activationFunction": [sigmoid, ReLU_leaky, ReLU],
            "outputFunction": [softMax],
            "#repetitions":
            5,
            "datafraction":
            1
        }

        optimalScore, optimalParams, optimalParamSTR = benchmarkNN(
            X,
            z,
            parameters,
            NN,
            mode="classification",
            randomSearch=False,
            writingPermissions=False,
            N=int(1e3))
        print("Optimal Neural Network parameters:",
              optimalScore,
              optimalParamSTR,
              sep="\n",
              end="\n\n")
Esempio n. 2
0
train_xs, train_ys = xs[train_indices], ys[train_indices]
test_xs, test_ys = xs[test_indices], ys[test_indices]

epochs = 100

# Standard BP
print("### Standard BP ###")
nn = NN([xs.shape[1], 8, len(set(ys))], ["relu", "softmax"],
        lr_init=0.05,
        regularization="L2")
stdBP_loss = []
start = time.time()
for epoch in tqdm(range(epochs)):
    this_epoch_losses = []
    for sample_xs, sample_ys in zip(train_xs, train_ys):
        nn.train(sample_xs.reshape(1, -1), sample_ys.reshape(-1))
        this_epoch_losses.append(nn.loss)
    stdBP_loss.append(np.mean(this_epoch_losses))
end = time.time()
stdBP_time = end - start
stdBP_acc = np.mean(np.argmax(nn.forward(test_xs), axis=-1) == test_ys)

# Accumulated BP
print("\n### Accumulated BP ###")
nn.reset()
acmlBP_loss = []
start = time.time()
for epoch in tqdm(range(epochs)):
    nn.train(train_xs, train_ys)
    acmlBP_loss.append(nn.loss)
end = time.time()
from NeuralNetwork import NN
import numpy as np


o=NN(0.1,2000)
Xtr=np.array([[0,0],[0,1],[1,0],[1,1]]).T
Ytr=np.array([[0,1,1,0]])
layerInfo=[[2,'tanh'],[1,'sigmoid']]
o.train(Xtr,Ytr,layerInfo,'Log')
print('Log ->\n',o.test(Xtr,layerInfo))
o.train(Xtr,Ytr,layerInfo,'Quadratic')
print('Quadratic ->\n',o.test(Xtr,layerInfo))
Esempio n. 4
0
# Linear SVM
print("\nTesting SVM with linear kernel...")
Linear_SVM = SVM(xs.shape[1], func='Linear')
Linear_SVM.fit(train_xs, train_ys_for_svm, C=100, epsilon=0.01, iters=10000)
Linear_svm_acc = np.mean(Linear_SVM.predict(test_xs)==test_ys_for_svm)

# Gaussian SVM
print("\nTesting SVM with Gaussian kernel...")
Gaussian_SVM = SVM(xs.shape[1], func='Gaussian', sigma=0.1)
Gaussian_SVM.fit(train_xs, train_ys_for_svm, C=1, epsilon=0.01, iters=100)
Gaussian_svm_acc = np.mean(Gaussian_SVM.predict(test_xs)==test_ys_for_svm)

# Neural Network
print("\nTesting Neural Network...")
nn = NN([xs.shape[1], 64, len(set(ys))], ["relu", "softmax"], lr_init=0.01, regularization="L2", regularization_lambda=0.1)
for epoch in tqdm(range(100)): nn.train(train_xs, train_ys)
nn_acc = np.mean(np.argmax(nn.forward(test_xs), axis=-1)==test_ys)

# Decision Tree
print("\nTesting Decision Tree...")
decisionTree = DecisionTree(train_xs, train_ys, test_xs, test_ys, attributes, isdiscs, labels)
decisionTree.buildTree(partIndex='InformationGain', prepruning=True)
decisionTree_acc = decisionTree.test(test_xs, test_ys)

# Demo
print("\nTest Accuracy:")
print("- Linear SVM      :    %.2f"%(Linear_svm_acc*100)+"%")
print("- Gaussian SVM    :    %.2f"%(Gaussian_svm_acc*100)+"%")
print("- Neural Network  :    %.2f"%(nn_acc*100)+"%")
print("- Decision Tree   :    %.2f"%(decisionTree_acc*100)+"%")
Esempio n. 5
0
dataset = []
for i in range(table.nrows):
    line = table.row_values(i)
    dataset.append(line)
dataset = np.array(dataset)

xs = dataset[1:, 1:-1].astype(np.float64)
ys = (dataset[1:, -1] == '是').astype(np.int32)

# train a neural network to learn from watermelon dataset
nn = NN([xs.shape[1], 16, len(set(ys))], ["sigmoid", "softmax"],
        lr_init=0.1,
        regularization=None)
for batch_idx in range(50000):
    nn.train(xs, ys)
    if batch_idx % 100 == 0:
        print("Loss = %.4f" % nn.loss)

# calculate accuracy
preds = nn.forward(xs)
preds = np.argmax(preds, axis=-1)
print("Accuracy: %.4f" % np.mean(preds == ys))

# plot data
positive_xs = xs[ys == 1]
negative_xs = xs[ys == 0]
plt.scatter(positive_xs[:, 0],
            positive_xs[:, 1],
            c='#00CED1',
            s=60,
Esempio n. 6
0
  data = readDatabase('DB_TALOS_186_secStruct_nn')
  
  print 'Making training set'
  trainingSet = makeTrainingDataMissingShift(data)
  
  nIn  = len(trainingSet[0][0])
  nOut = len(trainingSet[0][1])
  nHid = 3
  
  print "Inputs", nIn
  print "Hidden", nHid
  print "Output", nOut

  nn = NN(nIn, nHid, nOut, testFuncMissingShift)

  nn.train(trainingSet, iterations=50, N=0.5, M=0.2)
 
  #testFuncMissingShift(nn, trainingSet)
  """

  for iter in range(1):

    print iter

    # Train predict angles
    print 'Reading database'
    data = readDatabase('DB_TALOS_186_secStruct_nn')
    print 'Read %d entries' % len(data)

    nTest = int(len(data)/2.0)
Esempio n. 7
0
    (positive_ys[:-2 * pslide], negative_ys[:-2 * nslide]))
test_xs = np.vstack((positive_xs[-2 * pslide:], negative_xs[-2 * nslide:]))
test_ys = np.concatenate(
    (positive_ys[-2 * pslide:], negative_ys[-2 * nslide:]))

epochs = 100

# constant lr
print("### Constant Learning Rate ###")
nn = NN([xs.shape[1], 64, 64, len(set(ys))], ["relu", "relu", "softmax"],
        lr_init=0.01,
        regularization="L2",
        regularization_lambda=0.1)
lr_const_BP_loss = []
for epoch in tqdm(range(epochs)):
    nn.train(train_xs, train_ys)
    lr_const_BP_loss.append(nn.loss)
lr_const_BP_acc = np.mean(np.argmax(nn.forward(test_xs), axis=-1) == test_ys)

# exponential decay lr
print("\n### Exponential Decay Learning Rate ###")
nn = NN([xs.shape[1], 64, 64, len(set(ys))], ["relu", "relu", "softmax"],
        lr_init=0.01,
        lr_decay=0.99,
        lr_min=0.0001,
        regularization="L2",
        regularization_lambda=0.1)
lr_decay_BP_loss = []
for epoch in tqdm(range(epochs)):
    nn.train(train_xs, train_ys)
    lr_decay_BP_loss.append(nn.loss)