Beispiel #1
0
def test_mlp():
    X_train, y_train = load_mnist('./data', 'train')
    # Validate row and column count on train data
    print('Rows: {}, columns: {}'.format(X_train.shape[0], X_train.shape[1]))
    if int(X_train.shape[0]) != 60000:
        print('Rows invalid -- FAIL')
    if int(X_train.shape[1]) != 784:
        print('Columns invalid -- FAIL')

    # Validate row and column count on test data
    X_test, y_test = load_mnist('./data', 't10k')
    print('Rows: {}, columns: {}'.format(X_test.shape[0], X_test.shape[1]))
    if int(X_test.shape[0]) != 10000:
        print('Rows invalid -- FAIL')
    if int(X_test.shape[1]) != 784:
        print('Columns invalid -- FAIL')

    # Validate the image integrity is retained
    fig, ax = plt.subplots(nrows=2, ncols=5,
                           sharex=True, sharey=True)

    ax = ax.flatten()
    for i in range(10):
        img = X_train[y_train == i][0].reshape(28, 28)
        ax[i].imshow(img, cmap='Greys')

        ax[0].set_xticks([])
        ax[0].set_yticks([])
        plt.tight_layout()
        plt.show()

    # Compress the data for portability
    np.savez_compressed('mnist_scaled.npz',
                        X_train=X_train,
                        y_train=y_train,
                        X_test=X_test,
                        y_test=y_test)

    mlp = MultiLayerPerceptron(n_hidden=100,
                               l2=0.01,
                               epochs=200,
                               eta=0.0005,
                               minibatch_size=100,
                               shuffle=True,
                               seed=1)
    # Train the network
    mlp.fit(X_train=X_train[:55000],
            y_train=y_train[:55000],
            X_valid=X_train[55000:],
            y_valid=y_train[55000:])

    # Visualize the output
    plt.plot(range(mlp.epochs), mlp.eval_['cost'])
    plt.ylabel('Cost')
    plt.xlabel('Epochs')
    plt.show()
 def setUp(self):
     unittest.TestCase.setUp(self)
     self.params = {'neuronsByLayer': [2],     \
                    'learningRate': 0.2,       \
                    'numSamples': 4,           \
                    'trainingPercentage': 1.0, \
                    'beta': 3.0}
     self.inputs = [[0,0], [0,1], [1,0], [1,1]]
     self.biasedInputs = [[0,0,-1], [0,1,-1], [1,0,-1], [1,1,-1]]
     self.targets = [[0],[1],[1], [0]]
     self.weights =[[[1,1],[1,1], [0.5,1]],[[1],[-1],[0.5]]]
     (copyParams, copyInputs) = copy.deepcopy((self.params, self.inputs))
     self.percepter = MultiLayerPerceptron(copyParams, copyInputs)
 def __init__(self,
              num_envstate_dims,
              num_action_dims,
              hidden_layer_sizes,
              criterion=nn.MSELoss(),
              lr=4e-4,
              activation=f.selu,
              seed=0):
     torch.manual_seed(seed)
     super(BehaviorCloningModel, self).__init__()
     self.mlp = MultiLayerPerceptron(in_features=num_envstate_dims,
                                     hidden_layer_sizes=hidden_layer_sizes +
                                     [num_action_dims],
                                     activation=activation)
     self.criterion = criterion
     self.optimizer = optim.Adam(self.mlp.parameters(), lr=lr)
class PerceptronTests(unittest.TestCase):
    def setUp(self):
        unittest.TestCase.setUp(self)
        self.params = {'neuronsByLayer': [2],     \
                       'learningRate': 0.2,       \
                       'numSamples': 4,           \
                       'trainingPercentage': 1.0, \
                       'beta': 3.0}
        self.inputs = [[0,0], [0,1], [1,0], [1,1]]
        self.biasedInputs = [[0,0,-1], [0,1,-1], [1,0,-1], [1,1,-1]]
        self.targets = [[0],[1],[1], [0]]
        self.weights =[[[1,1],[1,1], [0.5,1]],[[1],[-1],[0.5]]]
        (copyParams, copyInputs) = copy.deepcopy((self.params, self.inputs))
        self.percepter = MultiLayerPerceptron(copyParams, copyInputs)

    def testConstructor(self):
        (copyParams, copyInputs) = copy.deepcopy((self.params, self.inputs))
        testPercepter = MultiLayerPerceptron(copyParams, copyInputs)
        expectedParams = list(map(lambda x:x+1, self.params['neuronsByLayer']))
        self.assertListEqual(expectedParams, testPercepter.params['neuronsByLayer'])
        self.assertListEqual(self.biasedInputs, testPercepter.inputs)

    def testInitializeWeights(self):
        weights = self.percepter.initializeWeights(len(self.biasedInputs[0]), self.percepter.params['neuronsByLayer'], 1)
        #count number of hidden layers + output layer
        self.assertEqual(len(weights), len(self.weights))
        for i in range(len(self.weights)):
            self.assertEqual(len(weights[i]), len(self.weights[i]))
            for j in range(len(self.weights[i])):
                self.assertEqual(len(weights[i][j]), len(self.weights[i][j]))
 def __init__(self,
              num_envstate_dims,
              num_action_dims,
              hidden_layer_sizes,
              activation=f.selu,
              seed=0):
     torch.manual_seed(seed)
     super(BehaviorCloningModel, self).__init__()
     self.mlp = MultiLayerPerceptron(in_features=num_envstate_dims,
                                     hidden_layer_sizes=hidden_layer_sizes +
                                     [num_action_dims],
                                     activation=activation)
 def open(self,*args,**kwargs):
     image_list,teacher_data = get_data()
     self.mlp = MultiLayerPerceptron(len(image_list[0]), len(image_list[0]), len(teacher_data[0]), "tanh", "sigmoid")
     pass
class FaceDetectionHandler(tornado.websocket.WebSocketHandler):
    
    supports_binary = True
    
    def open(self,*args,**kwargs):
        image_list,teacher_data = get_data()
        self.mlp = MultiLayerPerceptron(len(image_list[0]), len(image_list[0]), len(teacher_data[0]), "tanh", "sigmoid")
        pass
    
    
    def on_message(self,message):
        message = json.loads(message)
        #print type(message)
        #print len(message)
        #print np.array(message,dtype="uint8")
        message = np.resize(np.array(message,dtype="uint8"),(100,100))
        #gray, detected_faces = self.detect_face(message)
        #face_index = 0
        #plt.imshow(gray,cmap='Greys_r')
        #plt.show()
        #print gray
        #print detected_faces,"d"
        # predict output
        #for face in detected_faces:
        #    (x, y, w, h) = face
            #print "found!!"
            #print x,y,w,h
            #extracted_face = self.extract_face_features(gray, face, (0.03, 0.05)) #(0.075, 0.05)
        output = self.predict(message)
        print output
            #self.write_message(json.dumps({"x":int(x),"y":int(y),"w":int(w),"h":int(h)}))
            
            
        #plt.imshow(message,cmap="gray")
        #plt.show()
        
    def detect_face(self,gray):
        cascPath = "data/haarcascade_frontalface_default.xml"
        faceCascade = cv2.CascadeClassifier(cascPath)
        #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        detected_faces = faceCascade.detectMultiScale(
            gray,
            scaleFactor=1.1,
            minNeighbors=6,
            minSize=(10, 10),
            flags=cv2.cv.CV_HAAR_SCALE_IMAGE
        )
        return gray, detected_faces

    '''    
    def extract_face_features(self,gray, detected_face, offset_coefficients):
        (x, y, w, h) = detected_face
        horizontal_offset = offset_coefficients[0] * w
        vertical_offset = offset_coefficients[1] * h
        extracted_face = gray[y+vertical_offset:y+h,
                              x+horizontal_offset:x-horizontal_offset+w]
        new_extracted_face = zoom(extracted_face, (64. / extracted_face.shape[0],
                                           64. / extracted_face.shape[1]))
        new_extracted_face = new_extracted_face.astype(float32)
        new_extracted_face /= float(new_extracted_face.max())
        
        return new_extracted_face
    '''
    def predict(self,face):
        
        self.mlp.predict_from_stored_weight(face)
        return
Beispiel #8
0
        # ファイルの作成
        # precision
        preMat = open("./precision/preMat" + str(loop) + ".csv", "w")
        preMat = csv.writer(preMat)
        # recall
        recMat = open("./recall/recMat" + str(loop) + ".csv", "w")
        recMat = csv.writer(recMat)
        # fscore
        fscMat = open("./F-score/fscMat" + str(loop) + ".csv", "w")
        fscMat = csv.writer(fscMat)

        # 多層パーセプトロンを構築
        mlp = MultiLayerPerceptron(28 * 28,
                                   1000,
                                   10,
                                   act1="tanh",
                                   act2="softmax",
                                   preMat=preMat,
                                   recMat=recMat,
                                   fscMat=fscMat)

        #####################
        # 0~9の全てのデータ
        #####################

        # 訓練データとテストデータに分解
        X_train, X_test, y_train, y_test = train_test_split(X,
                                                            y,
                                                            test_size=0.1)

        # 教師信号の数字を1-of-K表記に変換(全てのデータ)
        labels_train = LabelBinarizer().fit_transform(y_train)
Beispiel #9
0
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder

from mlp import MultiLayerPerceptron
import numpy as np

iris = datasets.load_iris()

x = iris.data
y = iris.target[:, np.newaxis]

np.random.shuffle(x)
np.random.shuffle(y)

x -= np.mean(x)
x /= np.max(x)

x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=.20, random_state=0)

onehot_encoder = OneHotEncoder(sparse=False, categories='auto')
y_train = onehot_encoder.fit_transform(y_train)

mlp = MultiLayerPerceptron(x_train.shape[1], [10, 3])
mlp.fit(x_train.T, y_train.T, 0.01, 10000)

print(mlp.accuracy(x_test.T, y_test.T))
class BehaviorCloningModel(nn.Module):
    """
    Model predicts action given state of the environment
    """
    def __init__(self,
                 num_envstate_dims,
                 num_action_dims,
                 hidden_layer_sizes,
                 criterion=nn.MSELoss(),
                 lr=4e-4,
                 activation=f.selu,
                 seed=0):
        torch.manual_seed(seed)
        super(BehaviorCloningModel, self).__init__()
        self.mlp = MultiLayerPerceptron(in_features=num_envstate_dims,
                                        hidden_layer_sizes=hidden_layer_sizes +
                                        [num_action_dims],
                                        activation=activation)
        self.criterion = criterion
        self.optimizer = optim.Adam(self.mlp.parameters(), lr=lr)

    def forward(self, state):
        return self.mlp(state)

    def train_and_validate(self, train_dl, valid_dl, num_epochs):
        loss_list = []
        avg_loss_list = []
        valid_loss_list = []
        logger.info("Starting with epoch 0")
        for epoch in range(num_epochs):
            losses_for_given_epoch = []
            self.mlp.train()
            for states, actions in train_dl:
                states = states.float()
                actions = actions.float()

                self.optimizer.zero_grad()
                # Generate predictions
                pred_actions = self.mlp(states)
                loss = self.criterion(pred_actions, actions)

                loss.backward()
                self.optimizer.step()
                losses_for_given_epoch.append(loss.item())

            self.mlp.eval()
            with torch.no_grad():
                valid_loss_sum = 0
                for states, actions in valid_dl:
                    states = states.float()
                    actions = actions.float()

                    pred_actions = self.mlp(states)
                    valid_loss_sum += self.criterion(pred_actions, actions)

                valid_loss = valid_loss_sum / len(valid_dl)

            loss_list += losses_for_given_epoch
            avg_loss_list.append(np.mean(losses_for_given_epoch))
            valid_loss_list.append(valid_loss)
            logger.info(f'Completed epoch: {epoch}/{num_epochs}')
            logger.info(
                f'Avg loss this epoch: {np.mean(losses_for_given_epoch)}')
            logger.info(f'Validation loss this epoch: {valid_loss}')

        return loss_list, avg_loss_list, valid_loss_list
Beispiel #11
0
# shuffle rows
df = df.reindex(np.random.permutation(df.index))

from mlp import MultiLayerPerceptron

X = df.iloc[0:150, [0, 1, 2, 3]].values
y = df.iloc[0:150, 4].values

training_X = df.iloc[0:100, [0, 1, 2, 3]].values
testing_X = df.iloc[101:150, [0, 1, 2, 3]].values

training_y = df.iloc[0:100, 4].values
training_y = np.where(training_y == 'Iris-setosa', -1, 1)

testing_y = df.iloc[101:150, 4].values
testing_y = np.where(testing_y == 'Iris-setosa', -1, 1)

perceptron = MultiLayerPerceptron(iteration_count=100)
perceptron.fit(training_X, training_y)

print perceptron.w_  # final parameters
print ""

print "Predicted"
print perceptron.predict(testing_X)
print ""

print "Actual"
print testing_y
Beispiel #12
0
if testing == "sin":
    factory.generate_sin_values(inputs[testing], target[testing])

output = np.zeros(target[testing].shape)

options = {
    "xor": {
        "number_inputs": number_inputs,
        "number_hidden_units": number_hidden_units,
        "number_outputs": number_outputs,
        "activation_type": activation_type
    },
    "sin": factory.sin_finder(activation_type)
}

nn = MultiLayerPerceptron(options[testing])

for epoch in range(max_epochs):
    error = 0
    for index, value in enumerate(inputs[testing]):
        output[index] = nn.forward(value)
        error += nn.backward(target[testing][index])
        nn.update_weights(learning_rate[activation_type])
    if epoch % 100 == 0:
        print('Epoch:\t{}\tError:\t{}'.format(epoch, error))
        learning_rate[activation_type] *= learning_rate_change[activation_type]

if testing == "xor":
    print('Output: {0}'.format(output))
print('Average difference between target and output: {0}'.format(
    nn.average_miss(target[testing], output)))
Beispiel #13
0
from mlp import MultiLayerPerceptron


TEST_PATH = "data/test.csv"
TRAIN_PATH = "data/train.csv"
test_set = TestSet(TEST_PATH).read()
train_set = TrainSet(TRAIN_PATH).read()
x_train = train_set.drop("label", axis=1).values.astype('float32')
y_train = train_set["label"].values.astype('int32')

#preprocessing
max_value = np.max(x_train)
mean_value = np.mean(x_train)
test_set = (test_set - mean_value) / max_value
x_train = (x_train - mean_value) / max_value


#MLP
MLP_SUBMISSION = "mlp.csv"
mlp = MultiLayerPerceptron(x_train, y_train)
mlp.fit()
predictions = mlp.predict(test_set)
Submission(predictions).save(MLP_SUBMISSION)


for i in range(1, 10):
    random_image = test_set[i,:]
    print(predictions[i])
    Pixel(random_image).display()

        # ファイルの作成
        # precision_recall_fscore_support
        preMat = open("./precision/preMat" + str(loop) + ".csv", "w")
        preMat = csv.writer(preMat)
        # recall
        recMat = open("./recall/recMat" + str(loop) + ".csv", "w")
        recMat = csv.writer(recMat)
        # fscore
        fscMat = open("./F-score/fscMat" + str(loop) + ".csv", "w")
        fscMat = csv.writer(fscMat)

        # 多層パーセプトロンを構築
        mlp = MultiLayerPerceptron(28 * 28,
                                   200,
                                   10,
                                   act1="sigmoid",
                                   act2="softmax",
                                   preMat=preMat,
                                   recMat=recMat,
                                   fscMat=fscMat)

        #-------------------------------
        # 訓練データ、テストデータを用意する
        #-------------------------------
        # 全ての数字を含むテスト用のデータを用意
        # 訓練データとテストデータに分解 (使うのはテストデータのみ)
        X_train_all, X_test_all, y_train_all, y_test_all = train_test_split(
            X, y, test_size=0.1)

        # 4を覗いたインデックスを取得
        index_without4 = np.where((y != 4))
        # 4を除いたインデックスの教師信号を取得
Beispiel #15
0
        "optimizer": torch.optim.SGD,
        "loss_fnc": torch.nn.MSELoss(),
        "plot": True
    }
    opts.update(args_dict)

    # label information
    label_name = "income"
    label_mapping = {'<=50K': 0, '>50K': 1}

    # random seed
    if not opts.seed is None:
        torch.manual_seed(opts.seed)

    # load data
    #   You only have to call this once, then it'll save the preprocessed data into a new .csv file
    train_loader, valid_loader = load_data( "../data/adult.csv", label_name, label_mapping,
                                            preprocess=True, batch_size=opts.batch_size, seed=opts.seed)

    #   Then you can just call it like this, which should make thing run faster
    #train_loader, valid_loader = load_data( "../data/adult_preprocessed.csv", label_name, label_mapping,
    #                                        preprocess=False, batch_size=opts.batch_size, seed=opts.seed)

    # creating model
    input_size = len(train_loader.dataset.data[0, :])
    output_size = 1
    model = MultiLayerPerceptron(input_size, output_size, """other parameters""")

    # training model
    final_statistics = train(model, train_loader, valid_loader, opts)
Beispiel #16
0
df = df.reindex(np.random.permutation(df.index))

from mlp import MultiLayerPerceptron


X = df.iloc[0:150, [0, 1, 2, 3]].values
y = df.iloc[0:150, 4].values


training_X = df.iloc[0:100, [0, 1, 2, 3]].values
testing_X = df.iloc[101:150, [0, 1, 2, 3]].values

training_y = df.iloc[0:100, 4].values
training_y = np.where(training_y == 'Iris-setosa', -1, 1)

testing_y = df.iloc[101:150, 4].values
testing_y = np.where(testing_y == 'Iris-setosa', -1, 1)

perceptron = MultiLayerPerceptron(iteration_count=100)
perceptron.fit(training_X, training_y)

print perceptron.w_  # final parameters
print ""

print "Predicted"
print perceptron.predict(testing_X)
print ""

print "Actual"
print testing_y
Beispiel #17
0
http://scikit-learn.org/
"""

if __name__ == "__main__":
    # scikit-learnの簡易数字データをロード
    # 1797サンプル, 8x8ピクセル
    digits = load_digits()

    # 訓練データを作成
    X = digits.data
    y = digits.target
    # ピクセルの値を0.0-1.0に正規化
    X /= X.max()

    # 多層パーセプトロン
    mlp = MultiLayerPerceptron(64, 100, 10, act1="tanh", act2="sigmoid")

    # 訓練データ(90%)とテストデータ(10%)に分解
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)

    # 教師信号の数字を1-of-K表記に変換
    # 0 => [1, 0, 0, 0, 0, 0, 0, 0, 0, 0]
    # 1 => [0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
    # ...
    # 9 => [0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
    labels_train = LabelBinarizer().fit_transform(y_train)
    labels_test = LabelBinarizer().fit_transform(y_test)

    # 訓練データを用いてニューラルネットの重みを学習
    mlp.fit(X_train, labels_train, learning_rate=0.01, epochs=10000)
"""


if __name__ == "__main__":
    # scikit-learnの簡易数字データをロード
    # 1797サンプル, 8x8ピクセル
    digits = load_digits()

    # 訓練データを作成
    X = digits.data
    y = digits.target
    # ピクセルの値を0.0-1.0に正規化
    X /= X.max()

    # 多層パーセプトロン
    mlp = MultiLayerPerceptron(64, 100, 10, act1="sigmoid", act2="sigmoid")

    # 訓練データ(90%)とテストデータ(10%)に分解
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)

    # 教師信号の数字を1-of-K表記に変換
    # 0 => [1, 0, 0, 0, 0, 0, 0, 0, 0, 0]
    # 1 => [0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
    # ...
    # 9 => [0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
    labels_train = LabelBinarizer().fit_transform(y_train)
    labels_test = LabelBinarizer().fit_transform(y_test)

    # 訓練データを用いてニューラルネットの重みを学習
    mlp.fit(X_train, labels_train, epochs=10000)
Beispiel #19
0
from mlp import MultiLayerPerceptron

mlp = MultiLayerPerceptron()

mlp.printModel()
Beispiel #20
0
def main():
    data = pd.read_csv('blood.csv')
    x_train = data.drop(['a'], axis=1)
    y_train = data['a']

    x_train = pd.get_dummies(x_train)
    target_map = dict()
    if y_train.dtype == 'object':
        target_map = {val: i for (i, val) in enumerate(np.unique(y_train))}
        # print(target_map)
        y_train = y_train.map(target_map)
        y_train = to_categorical(y_train)
        # print(y_train[:5])
    x_train = x_train.values
    # x_train = (x_train - x_train.min(axis=0)) / (x_train.max(axis=0) - x_train.min(axis=0))
    # x_train = (x_train - x_train.mean(axis=0)) / np.std(x_train, axis=0)

    x_train, x_test, y_train, y_test = train_test_split(x_train, y_train, test_size=0.2)

    mean_train = x_train.mean(axis=0)
    std_train = np.std(x_train, axis=0) 

    x_train = (x_train - mean_train)/std_train
    x_test = (x_test - mean_train)/std_train



    mlp = MultiLayerPerceptron()
    mlp.add(Input(x_train.shape[1]))
    mlp.add(Dense(32, activation='relu'))
    mlp.add(Dense(2, activation='sigmoid'))
    mlp.build()
    mlp.fit(x_train, y_train, epoch=40, lr=0.05, validation_data=(x_test, y_test))
    mlp.draw()
def runMyMLP(params, inputs, objective):
    percepter = MultiLayerPerceptron(params, inputs)
    percepter.train(objective.outputDim)
Beispiel #22
0
def main():
    data = pd.read_csv("blood.csv")
    x_train = data.drop(["a"], axis=1)
    y_train = data["a"]

    x_train = pd.get_dummies(x_train)
    target_map = dict()
    if y_train.dtype == "object":
        target_map = {val: i for (i, val) in enumerate(np.unique(y_train))}
        y_train = y_train.map(target_map)
        y_train = to_categorical(y_train)
    x_train = x_train.values

    x_train, x_test, y_train, y_test = train_test_split(x_train,
                                                        y_train,
                                                        test_size=0.2)

    mean_train = x_train.mean(axis=0)
    std_train = np.std(x_train, axis=0)

    x_train = (x_train - mean_train) / std_train
    x_test = (x_test - mean_train) / std_train

    mlp = MultiLayerPerceptron()
    mlp.add(Input(x_train.shape[1]))
    mlp.add(Dense(32, activation="relu"))
    mlp.add(Dense(2, activation="sigmoid"))
    mlp.build()
    mlp.fit(x_train,
            y_train,
            epoch=40,
            lr=0.05,
            validation_data=(x_test, y_test))
    mlp.draw()
    for loop in range(aveLoop):
        print "***",loop,"***"

        # ファイルの作成
        # precision
        preMat = open("./precision/preMat"+str(loop)+".csv", "w")
        preMat = csv.writer(preMat)
        # recall
        recMat = open("./recall/recMat"+str(loop)+".csv", "w")
        recMat = csv.writer(recMat)
        # fscore
        fscMat = open("./F-score/fscMat"+str(loop)+".csv", "w")
        fscMat = csv.writer(fscMat)

        # 多層パーセプトロンを構築
        mlp = MultiLayerPerceptron(28*28, 1000, 10, act1="tanh", act2="softmax", preMat=preMat, recMat=recMat, fscMat=fscMat)

        #-------------------------------
        # 訓練データ、テストデータを用意する
        #-------------------------------
        # 全ての数字を含むテスト用のデータを用意
        # 訓練データとテストデータに分解 (使うのはテストデータのみ)
        X_train_all, X_test_all, y_train_all, y_test_all = train_test_split(X, y, test_size=0.1)


        # 4を除いたインデックスを取得
        index_without4 = np.where((y != 4))
        # 4を除いたインデックスの教師信号を取得
        y_without4 = y[index_without4]
        # 4を除いたインデックスの入力を取得
        X_without4 = X[index_without4]
Beispiel #24
0
import numpy as np
from mlp import MultiLayerPerceptron

mlp = MultiLayerPerceptron(
    numero_de_entradas=2, neuronios_por_camada=[2, 2],
    pesos=[[np.array([0.35, 0.15, 0.2]), np.array([0.35, 0.25, 0.3])],
           [np.array([0.6, 0.4, 0.45]), np.array([0.6, 0.5, 0.55])]],
    taxa_aprendizagem=0.5, epocas=2, precisao=1e-7, debug_training=True, plot=False
)

# mlp = MultiLayerPerceptron(
#     neuronios_por_camada=[2,2],
#     pesos=[[np.zeros(3), np.zeros(3)],
#            [np.zeros(3), np.zeros(3)]],
#     taxa_aprendizagem=0.5, desejados=np.array([0.01, 0.99]),
#     epocas=10, precisao=1e-7,
#     debug_training=True, plot=False
# )
# mlp = MultiLayerPerceptron(
#     neuronios_por_camada=[2,2],
#     taxa_aprendizagem=0.5, desejados=np.array([0.01, 0.99]),
#     epocas=1, precisao=1e-7
# )

mlp.treinar(matriz_entradas=np.array([[0.05, 0.1]]), valores_desejados=np.array([[0.01, 0.99]]))
Beispiel #25
0
max_epochs = 500

factory = MLP_Factory()

input_train = np.zeros((150, 4))
target_train = np.zeros((150, 1))
factory.generate_sin_values(input_train, target_train)
input_test = np.zeros((50, 4))
target_test = np.zeros((50, 1))
factory.generate_sin_values(input_test, target_test)

output_train = np.zeros(target_train.shape)
output_test = np.zeros(target_test.shape)
options = factory.sin_finder(activation_type)
nn = MultiLayerPerceptron(options)

# Training
print("----------------------------------\nTrain\n----------------------------------")
for epoch in range(max_epochs):
    error = 0
    for index, value in enumerate(input_train):
        output_train[index] = nn.forward(value)
        error += nn.backward(target_train[index])
        nn.update_weights(learning_rate[activation_type])
    if epoch % 500 == 0:
        print('Epoch:\t{0}\tError:\t{1}'.format(epoch, error))
        learning_rate[activation_type] *= learning_rate_change[activation_type]

print('Training:\tAverage difference between target and output: {0}'.format(
    nn.average_miss(target_train, output_train)[0]))
    for loop in range(aveLoop):
        print "***", loop, "***"

        # ファイルの作成
        # precision
        preMat = open("./precision/preMat"+str(loop)+".csv", "w")
        preMat = csv.writer(preMat)
        # recall
        recMat = open("./recall/recMat"+str(loop)+".csv", "w")
        recMat = csv.writer(recMat)
        # fscore
        fscMat = open("./F-score/fscMat"+str(loop)+".csv", "w")
        fscMat = csv.writer(fscMat)

        # 多層パーセプトロンを構築
        mlp = MultiLayerPerceptron(28*28, 1000, 10, act1="sigmoid", act2="softmax", preMat=preMat, recMat=recMat, fscMat=fscMat)

        #####################
        # 0~9の全てのデータ
        #####################

        # 訓練データとテストデータに分解
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)

        # 教師信号の数字を1-of-K表記に変換(全てのデータ)
        labels_train = LabelBinarizer().fit_transform(y_train)
        labels_test = LabelBinarizer().fit_transform(y_test)


        # 教師信号の数字を1-of-K表記に変換
        labels_train = LabelBinarizer().fit_transform(y_train)
Beispiel #27
0
#fertility_output_balanced = fertility_df_output.tolist()

## Faz o split dos dados para 70% de treino e 30% de teste
training_data, test_data, training_output, test_output = train_test_split(
    fertility_df_balanced,
    fertility_output_balanced,
    test_size=0.3,
    random_state=42)

## Realiza o treinamento do MLP
quantidade_features = training_data.shape[1]

mlp = MultiLayerPerceptron(numero_de_entradas=quantidade_features,
                           neuronios_por_camada=[quantidade_features, 1],
                           taxa_aprendizagem=0.5,
                           epocas=1,
                           precisao=0,
                           debug_training=False,
                           plot=False)

mlp.treinar(matriz_entradas=training_data,
            valores_desejados=np.array(training_output))

print("Pesos da rede:")
for camada in mlp.camadas:
    for neuronio in camada.neuronios:
        print("Camada", camada.indice, ", neurônio", neuronio.indice, ":")
        print(mlp.pesos[camada.indice][neuronio.indice])

mlp.avaliar(saidas_esperadas=test_output, amostras_de_teste=test_data)
 def testConstructor(self):
     (copyParams, copyInputs) = copy.deepcopy((self.params, self.inputs))
     testPercepter = MultiLayerPerceptron(copyParams, copyInputs)
     expectedParams = list(map(lambda x:x+1, self.params['neuronsByLayer']))
     self.assertListEqual(expectedParams, testPercepter.params['neuronsByLayer'])
     self.assertListEqual(self.biasedInputs, testPercepter.inputs)
Beispiel #29
0
[34]: male/???/female (1/0/-1)
[35]: education (?)
[36]: income (?)
"""
num_class = 3
feature_dim = 12
num_value = 10
data = np.load('data.npy')
neural_data = np.load('NN_data.npy')
labels = np.load('labels.npy')
#print("Bayes:")
#Bayes = NaiveBayes(num_class,feature_dim,num_value)
#Bayes.train(data, labels)
#guess = Bayes.guess(bayes_features)
#Bayes.test(data, labels)
#print(guess)
#print("Perceptron:")
#Perceptron = MultiClassPerceptron(num_class,feature_dim)
#Perceptron.train(data, labels)
#Perceptron.test(data, labels)
#print("Q Agent:")
#Reinforcement = Q_Agent(("rock", "paper", "scissors"), 5, 40, 0.7)
#Reinforcement.train(data, labels)
#Reinforcement.test(data, labels)
print("FFNN:")
MLP = MultiLayerPerceptron(20, len(neural_data[0]))
print("Training..")
MLP.train(neural_data, labels, True)
print("Testing..")
MLP.test(neural_data, labels)
    #   Then you can just call it like this, which should make thing run faster
    #train_loader, valid_loader = load_data( "../data/adult_preprocessed.csv", label_name, label_mapping,
    #                                        preprocess=False, batch_size=opts.batch_size, seed=opts.seed)

    # based on label_mapping variable, we specify some useful variables in opts
    if label_mapping is None:  # regression
        opts.classification_type = "regression"
        opts.total_correct = total_correct_regression
        opts.output_size = 1
    elif len(label_mapping) == 2:  # binary
        opts.classification_type = "binary"
        opts.total_correct = total_correct_binary
        opts.output_size = 1
    elif len(label_mapping) > 2:  # multiclass
        opts.classification_type = "mulitclass"
        opts.total_correct = total_correct_multiclass
        opts.output_size = len(label_mapping)
    else:
        ValueError("'label_mapping' needs to have more than 1 attribute")

    # creating model
    input_size = len(train_loader.dataset.data[0, :])
    model = MultiLayerPerceptron(input_size,
                                 num_hidden_layers=opts.num_hidden_layers,
                                 hidden_size=opts.hidden_size,
                                 actfunction=opts.actfunction,
                                 output_size=opts.output_size)

    # training model
    final_statistics = train(model, train_loader, valid_loader, opts)