def __init__(self, _numInput=3, numAction=3): self.action_paturn = range(numAction) self.learningObj = MultiLayerPerceptron(numInput=_numInput, numHidden=5, numOutput=numAction, activate1="tanh", activate2="sigmoid") self.X = [] self.Y = [] self.learnFlg = True
def get_trained_model(self): if self.model_name == 'LDA': self.model = LDA(self.x_train, self.y_train, self.x_test, self.y_test) self.model.train_model() elif self.model_name == 'LR': self.model = LogisticReg(self.x_train, self.y_train, self.x_test, self.y_test) self.model.train_model() elif self.model_name == 'MLP': self.model = MultiLayerPerceptron(self.x_train, self.y_train, self.x_test, self.y_test) self.model.train_model() elif self.model_name == 'SVM': self.model = SupportVectorMachine(self.x_train, self.y_train, self.x_test, self.y_test) self.model.train_model() return self.model.get_model()
def get_trained_model(self): if self.model_name == 'MLP': self.model = MultiLayerPerceptron(self.x_train, self.y_train, self.x_test, self.y_test) self.model.train_model() elif self.model_name == 'LR': self.model = LogisticReg(self.x_train, self.y_train, self.x_test, self.y_test) self.model.train_model() elif self.model_name == 'DT': self.model = DecisionTree(self.x_train, self.y_train, self.x_test, self.y_test) self.model.train_model() elif self.model_name == 'XGB': self.model = XGBoost(self.x_train, self.y_train, self.x_test, self.y_test) self.model.train_model() return self.model.get_model()
from MultiLayerPerceptron import MultiLayerPerceptron import numpy as np from matplotlib import pyplot as plt output_file = "/home/hugh/connect_comp/ProgrammingAssignment/Task1.txt" mlp = MultiLayerPerceptron((2, 2, 1), hidden_activation="sigmoid", max_iters=10000, linear_factor=0.2, learning_rate=0.7, verbose=(True, 1000,), output_activation="linear", output_file=output_file) x = np.array([[1, 0], [0, 1], [0, 0], [1, 1]]) y = np.array([1, 1, 0, 0]) errors = mlp.fit(x, y) plt.figure() plt.plot(errors, color="blue", label="Sigmoid + Linear") plt.title("Mean Squared Error Over Time") plt.xlabel("$Epochs$") plt.ylabel("$Error$") plt.show() #plt.savefig("/home/hugh/connect_comp/ProgrammingAssignment/Task1.png") print("1, 1") print(mlp.predict(np.array([1, 1]))) print("0, 0") print(mlp.predict(np.array([0, 0]))) print("0, 1") print(mlp.predict(np.array([0, 1]))) print("1, 0") print(mlp.predict(np.array([1, 0])))
# split into train test split index = np.random.rand(len(y)) < 0.9 x_train = np.array(x[index]) y_train = np.array(y[index]) x_test = np.array(x[~index]) y_test = np.array(y[~index]) mlp = MultiLayerPerceptron(( len(x.columns), 20, len(y.unique()), ), max_iters=200, hidden_activation="relu", output_activation="softmax", linear_factor=1, learning_rate=0.01, verbose=(True, 100), weight_update=1, loss="crossentropy") errors = mlp.fit(x_train, y_train) plt.plot(errors, color="blue", label="Relu + Softmax") plt.title("Cross Entropy Error Over Time") plt.xlabel("$Epochs$") plt.ylabel("$Error$") mlp = MultiLayerPerceptron((
from TrainingPattern import TrainingPattern from MultiLayerPerceptron import MultiLayerPerceptron if __name__ == "__main__": trainingPatterns = [ TrainingPattern([1,0,0,0], [1,0,0,0]), TrainingPattern([0,1,0,0], [0,1,0,0]), TrainingPattern([0,0,1,0], [0,0,1,0]), TrainingPattern([0,0,0,1], [0,0,0,1]) ] mlp = MultiLayerPerceptron(inputLayerSize=4, hiddenLayersSize=[2], outputLayerSize=4, epochs=10000, learningStep=0.5, biasNeuron=True) mlp.train(trainingPatterns) for tp in trainingPatterns: out = mlp.calculateNetworkOutput(tp) print("Actual output : {} mlp returned : {}".format(tp.outputs, out))
class QNeuralNework(object): """ action: パターンの数だけ保持 学習アルゴリズム: Q学習 a = getNextAction(s) lean(S,a,r,S_next) ※ ゲームルールによらない汎用性を持たす """ ALPHA = 0.5 GAMMA = 0.9 DATASET_NUMBER = 100 LEAN_EPOCHS = 500 LEAN_RATE = 0.2 GREEDY_RATIO = 0.5 def __init__(self, _numInput=3, numAction=3): self.action_paturn = range(numAction) self.learningObj = MultiLayerPerceptron(numInput=_numInput, numHidden=5, numOutput=numAction, activate1="tanh", activate2="sigmoid") self.X = [] self.Y = [] self.learnFlg = True def learn(self, o, a, r, o_next): """Q学習 or NeuralNetworkを使って,Q値を学習""" dQs = self.learningObj.predict(o) qk = dQs[a] maxQ = np.max(dQs) dQs[a] = qk + self.ALPHA * (r + self.GAMMA * maxQ - qk) self.X.append(np.asarray(o)) self.Y.append(np.asarray(dQs)) if len(self.X) > self.DATASET_NUMBER: self.X.pop(0) self.Y.pop(0) err = self.learningObj.fit(np.asarray(self.X), np.asarray(self.Y), learning_rate=self.LEAN_RATE, epochs=self.LEAN_EPOCHS) return err def getNextAction(self, o): a = None # 最大Q値の行動選択 # 観測(observe)から、NNでQ値(配列)を取得 Q_t = self.learningObj.predict(o) maxQt_idx = np.argmax(Q_t) best_actions = maxQt_idx # Q値最大の中からランダム選択 a = np.random.choice(np.atleast_1d(best_actions)) # if self.learnFlg: # return a # greedyの行動選択 import random if self.GREEDY_RATIO < random.random(): return a else: return np.random.choice(self.action_paturn)
# -*- coding: utf-8 -*- """ MLP_test.py 多層パーセプトロン """ import numpy as np import sys from MultiLayerPerceptron import MultiLayerPerceptron from ndprint import ndprint, ndprints if __name__ == '__main__': # データセットの生成 # ------------------ X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) y = np.array([0, 1, 1, 0]) # 多層パーセプトロンの初期化 mlp = MultiLayerPerceptron(numInput=2, numHidden=5, numOutput=1, activate1="tanh", activate2="identity") # パーセプトロンの学習 mlp.fit(X, y,learning_rate=0.2, epochs=10000) # パーセプトロンを実行 y0 = mlp.predict(X[0]) for x, y in zip(X, y): print 'X:%s, y:%0.2f, pred:%0.2f' % (ndprint(x), y, mlp.predict(x))
def MLP(data, batch_size): #data = load_iris() # list1 = list(data.iloc[0,:-2]) # print(list1) # target = list(data.iloc[0,4:]) # print(target) mlp = MultiLayerPerceptron(_layers=1, _bias=1, _inputs=4, _outputs=2, _learningRate=0.01, _maxIter=200) for i in range(mlp.maxIter): mlp.totalError = 0 data = shuffle(data) #print(data.data[0]) for j in range(len(data)): row = list(data.iloc[j, :-mlp.outputs]) target = list(data.iloc[j, mlp.inputs:]) mlp.estimate(row) #mlp.estimate(data.data[j]) mlp.updateDeltaWeight(target) if j % batch_size == 0: mlp.updateWeight() mlp.updateTotalError(target) # print("at iteration ", i, " error = " + str(mlp.totalError)) if j % batch_size != 0: mlp.updateWeight() mlp.updateTotalError(target) # print("at iteration ", i, " error = " + str(mlp.totalError)) if (mlp.totalError < 0.01): break return mlp
output_vector.append(combine(unit)) input_vector = np.array(input_vector) output_vector = np.array(output_vector) index = np.random.rand(len(input_vector)) < 0.8 x_train = input_vector[index] y_train = output_vector[index] x_test = input_vector[~index] y_test = output_vector[~index] mlp = MultiLayerPerceptron((4, 5, 1), hidden_activation="sigmoid", max_iters=1000, linear_factor=1, learning_rate=0.3, verbose=(True, 100), weight_update=10) errors = mlp.fit(x_train, y_train) plt.plot(errors, color="blue", label="Sigmoid + Linear") plt.title("Mean Squared Error Over Time") plt.xlabel("$Epochs$") plt.ylabel("$Error$") plt.legend() #plt.show() plt.savefig("/home/hugh/connect_comp/ProgrammingAssignment/Task2.png")