Beispiel #1
0
 def simpleTrain(self, net=None, nConvergence=0):
     if (net == None):
         net = self.net
     if (nConvergence == 0):
         nConvergence = self.nIters
     Network.train(net, nIters=self.nIters, nConvergence=nConvergence)
     return self.summary(net)
Beispiel #2
0
    def progressiveTrain(self, net=None):
        if (net == None):
            net = self.net
        for train in self.trainPlane:
            net.unfreezAllTrainableVar()
            for layer in train["freez_layers"]:
                net.freezTrainableVar(layer)
            Network.train(net, nIters=train["n_iters"])

        return self.summary(net)
Beispiel #3
0
    def __init__(self, n_of_inputs, hidden_layers, n_of_outputs,max_error):
        rev = hidden_layers
        rev = rev[::-1]
        hidden_layers.append(n_of_outputs)
        hidden_layers.extend(rev)

        self.autoencoder = Network(n_of_inputs, hidden_layers, n_of_inputs,max_error)
        self.n_of_inputs = n_of_inputs
        self.n_of_outputs = n_of_outputs
        self.hl = hidden_layers
        self.neurons_of_layer = [self.n_of_inputs] + self.hl + [self.n_of_inputs]
        print("neurons_of_layer:", self.neurons_of_layer)
Beispiel #4
0
def readMyFile(filename):
    data = []
    labels = []
    with open(filename) as csvDataFile:
        csvReader = csv.reader(csvDataFile)
        for row in csvReader:
            n = row.__len__()
            data.append([row[index] for index in range(0, n - 1)])
            labels.append(row[n - 1])
    print(data)
    print(labels)
    return np.asarray(data, dtype=float32), np.asarray(labels, dtype=float32)


# # Ricordare di cambiare i path dentro la radice che sceglierete dovrete creare la cartella config con dentro i json, consiglio di lasciare tutto come lo fornisco io
ROOT_DIR = os.path.abspath(os.curdir)
os.chdir(ROOT_DIR)
jsonStructureDir = ROOT_DIR + "\config\structure.txt"
jsonTrainDir = ROOT_DIR + "\config\\train.txt"
jsonProgressiveTrainDir = ROOT_DIR + "\config\\progressiveTrain.txt"

#2 esempio di test classe Trainer + Network (simple train) (file "structure.txt" +train.txt)
trainer = Trainer(jsonTrainDir)
# prendo un dataset di test dalla repository di sklearn
data, labels = readMyFile(trainer.path)
xTrain, xTest, yTrain, yTest = trainer.split(data, labels)
netTest_2 = Network(jsonStructureDir, xTrain, yTrain)
trainer.simpleTrain(netTest_2)
netTest_2.saveNetwork()
Beispiel #5
0
    print(data)
    print(labels)
    return np.asarray(data, dtype=float32), np.asarray(labels, dtype=float32)


# # Ricordare di cambiare i path dentro la radice che sceglierete dovrete creare la cartella config con dentro i json, consiglio di lasciare tutto come lo fornisco io
ROOT_DIR = os.path.abspath(os.curdir)
os.chdir(ROOT_DIR)
jsonStructureDir = ROOT_DIR + "\config\structure.txt"
jsonTrainDir = ROOT_DIR + "\config\\train.txt"
jsonProgressiveTrainDir = ROOT_DIR + "\config\\progressiveTrain.txt"

#1 esempio di test: classe Network stand-alone effettua solo un training di tutta la rete secondo le informazioni contenute nel json "structure.txt"
datasetPath = 'xor_test_100.csv'
data, labels = readMyFile(datasetPath)
netTest_1 = Network(jsonStructureDir, [], [], data, labels)
Network.train(netTest_1)
predProb = netTest_1.predict()
yHat = np.where(predProb < 0.5, 0, 1)
acc, trueVector, oneHotYTest = netTest_1.acc(yHat)
netTest_1.saveNetwork()
print("Test Accuracy %.2f" % acc)

#2 esempio di test classe Trainer + Network (simple train) (file "structure.txt" +train.txt)
trainer = Trainer(jsonTrainDir)
# prendo un dataset di test dalla repository di sklearn
data, labels = readMyFile(trainer.path)
xTrain, xTest, yTrain, yTest = trainer.split(data, labels)
netTest_2 = Network(jsonStructureDir, xTrain, yTrain)
trainer.simpleTrain(netTest_2)
netTest_2.saveNetwork()
    return np.asarray(data, dtype=float32), np.asarray(labels,  dtype=float32)



# # Ricordare di cambiare i path dentro la radice che sceglierete dovrete creare la cartella config con dentro i json, consiglio di lasciare tutto come lo fornisco io
ROOT_DIR = os.path.abspath(os.curdir)
os.chdir(ROOT_DIR)
jsonStructureDir = ROOT_DIR + "\config\structure.txt"
jsonTrainDir = ROOT_DIR + "\config\\train.txt"
jsonProgressiveTrainDir = ROOT_DIR + "\config\\progressiveTrain.txt"


#1 esempio di test: classe Network stand-alone effettua solo un training di tutta la rete secondo le informazioni contenute nel json "structure.txt"
datasetPath = 'xor_test_100.csv'
data,labels = readMyFile(datasetPath)
netTest_1 = Network(jsonStructureDir, [],[], data, labels)
Network.train(netTest_1)
predProb = netTest_1.predict()
yHat = np.where(predProb < 0.5, 0, 1)
acc, trueVector, oneHotYTest = netTest_1.acc(yHat)
netTest_1.saveNetwork()
print("Test Accuracy %.2f" % acc)


# #2 esempio di test classe Trainer + Network (simple train) (file "structure.txt" +train.txt)
# trainer = Trainer(jsonTrainDir)
# # prendo un dataset di test dalla repository di sklearn
# data,labels = readMyFile(trainer.path)
# xTrain, xTest, yTrain, yTest = trainer.split( data, labels)
# netTest_2 = Network(jsonStructureDir,xTrain,yTrain)
# trainer.simpleTrain(netTest_2)
Beispiel #7
0
def readMyFile(filename):
    data = []
    labels = []
    with open(filename) as csvDataFile:
        csvReader = csv.reader(csvDataFile)
        for row in csvReader:
            n = row.__len__()
            data.append([row[index] for index in range(0, n - 1)])
            labels.append(row[n - 1])
    print(data)
    print(labels)
    return np.asarray(data, dtype=float32), np.asarray(labels, dtype=float32)


# # Ricordare di cambiare i path dentro la radice che sceglierete dovrete creare la cartella config con dentro i json, consiglio di lasciare tutto come lo fornisco io
ROOT_DIR = os.path.abspath(os.curdir)
os.chdir(ROOT_DIR)
jsonStructureDir = ROOT_DIR + "\config\structure.txt"
jsonTrainDir = ROOT_DIR + "\config\\train.txt"

#2 esempio di test classe Trainer + Network (simple train) (file "structure.txt" +train.txt)
trainer = Trainer(jsonTrainDir)
data, labels = readMyFile(trainer.path)
xTrain, xTest, yTrain, yTest = trainer.split(data, labels)
#8 Uso della rete salvata, per usare una rete salvata basta mettere il suo nome nel file jsonStructureDir e istanzianziarla con rebuild
netTest_6 = Network(jsonStructureDir, rebuild=True)
predProb = netTest_6.predict(xTest)
yHat = np.where(predProb < 0.5, 0, 1)
acc, trueVector, oneHotYTest = netTest_6.acc(yHat, yTest)
print("Test Accuracy %.2f" % acc)
Beispiel #8
0
# Turn training and testing datasets into lists
X_train_numerical = df_train.values
X_train_binary = df_train_missing.values
X_test_numerical = df_test.values
X_test_binary = df_test_missing.values

# Helper functions for pre and post processing the output
min_y, max_y = min(y_train), max(y_train)
min_max_y = lambda a: (a-min_y) / (max_y-min_y)
reverse_y = lambda a: a * (max_y - min_y) + min_y


""" NETWORK SETUP """
# Create the network models: 2 sensor, 1 decision
# Note: existing architecture may be loaded using the load method from MLP library
numerical_sensor = Network(architecture=[Layer(11), Layer(11, 'sigmoid'), Layer(1, 'sigmoid')], l_rate=0.001) #Numerical Sensor
binary_sensor = Network(architecture=[Layer(11), Layer(4, 'sigmoid'), Layer(1, 'sigmoid')], l_rate=0.001) #Binary Sensor
decision = Network(architecture=[Layer(2), Layer(1, 'linear')], l_rate=0.001)  #Decision MLP

# Initialize the networks (this generates the initial weights)
numerical_sensor.initialize()
binary_sensor.initialize()
decision.initialize()

# Uncomment below for a small batch test
# X_train_numerical = X_train_numerical[:600]
# X_train_binary = X_train_binary[:600]
# y_train = y_train[:600]
# X_test_numerical = X_test_numerical[:300]
# X_test_binary = X_test_binary[:300]
# y_test = y_test[:300]
Beispiel #9
0
 def createNet(self, X, Y, jsonStructureDir):
     self.split(X, Y)
     self.net = Network(jsonStructureDir, x=self.xTrain, y=self.yTrain)
     return self.net
Beispiel #10
0
# TRAIN-TEST SPLIT
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=0.20,
                                                    random_state=1)

# MIN-MAX Output SCALING
min_y, max_y = min(y_train), max(
    y_train)  #keeping track of min and max to unscale the predicted output
y_train = [(y - min_y) / (max_y - min_y) for y in y_train]
print('> Data Preprocessed and Scaled')
'''NEURAL NETWORK'''
# Architecture, activation functions, and learning rate setup
net = Network(architecture=[Layer(11),
                            Layer(11, 'tanh'),
                            Layer(1, 'sigmoid')],
              l_rate=0.1)
print('> Network setup.')
net.initialize()  #network initilization
print('> Network initialized.')

# Training
trial_error = net.train(X_train,
                        y_train,
                        epochs=50,
                        verbose=True,
                        plot=True,
                        decay=0.1,
                        decay_rate=10)
print('> Training complete.')
Beispiel #11
0
class Autoencoder:

    def __init__(self, n_of_inputs, hidden_layers, n_of_outputs,max_error):
        rev = hidden_layers
        rev = rev[::-1]
        hidden_layers.append(n_of_outputs)
        hidden_layers.extend(rev)

        self.autoencoder = Network(n_of_inputs, hidden_layers, n_of_inputs,max_error)
        self.n_of_inputs = n_of_inputs
        self.n_of_outputs = n_of_outputs
        self.hl = hidden_layers
        self.neurons_of_layer = [self.n_of_inputs] + self.hl + [self.n_of_inputs]
        print("neurons_of_layer:", self.neurons_of_layer)

    # Epochs: cantidad  de epocas a entrenar
    # eta: learning rate
    # K: si el error decrece K veces, entonces eta = eta + a*eta
    # Q: si el error crece Q veces, entonces eta = eta + b*eta
    # adaptative_lr: activa o desactiva el decaimiento exponencial del eta
    def train(self, inputs, outputs, epochs, eta,K,a,Q,b, adaptive_lr=False):
        self.autoencoder.train(inputs, outputs, epochs, eta,K,a,Q,b, adaptive_lr)

    def decode(self, input_):
        # return self.autoencoder.predict(input_)
        weights = self.autoencoder.weights
        weights = weights[int(len(weights) / 2):]

        b = self.autoencoder.biases
        b = b[int(len(b) / 2):]

        neurons_of_layer = self.neurons_of_layer[int(len(self.neurons_of_layer) / 2):]

        activations = []
        for i in range(len(neurons_of_layer)):
            a = np.zeros(neurons_of_layer[i])
            activations.append(a)
        activations[0] = input_

        for i, w in enumerate(weights):
            x = np.dot(w.T, input_) + b[i].T
            x = x.reshape(x.shape[1])
            input_ = sigmoid(x)
            activations[i + 1] = input_

        return activations[-1]

    def encode(self, input_):
        weights = self.autoencoder.weights
        weights = weights[:int(len(weights) / 2)]

        b = self.autoencoder.biases
        b = b[:int(len(b) / 2)]

        neurons_of_layer = self.neurons_of_layer[:int(len(self.neurons_of_layer) / 2) + 1]

        activations = []
        for i in range(len(neurons_of_layer)):
            a = np.zeros(neurons_of_layer[i])
            activations.append(a)
        activations[0] = input_

        for i, w in enumerate(weights):
            x = np.dot(w.T, input_) + b[i].T
            x = x.reshape(x.shape[1])
            input_ = sigmoid(x)
            activations[i + 1] = input_

        return activations[-1]