Ejemplo n.º 1
0
                                 oDataSet.labels[oData.Training_indexes[train]], epochs)
            y_pred = []
            y_true = []
            for i in test:
                y_pred.append(mpl.predict(oDataSet.attributes[oData.Training_indexes[i]])[0, 0])
                y_true.append(oDataSet.labels[oData.Training_indexes[i]])
            grid_result[g1, k_slice] = mean_squared_error(y_true, y_pred)
            k_slice += 1
    print(grid_result)
    best_p = GRID[np.argmin(np.mean(grid_result, axis=1))]
    mpl = multi_Layered_perceptron_linear(LEARNING_RATE, (oDataSet.attributes.shape[1], best_p, 1))
    mpl.train_regression(oDataSet.attributes[oData.Training_indexes],
                         oDataSet.labels[oData.Training_indexes], epochs)
    y_pred = []
    y_true = []
    for i in oData.Testing_indexes:
        y_pred.append(mpl.predict(oDataSet.attributes[i])[0, 0])
        y_true.append(oDataSet.labels[i])
        plt.scatter(oDataSet.attributes[i], y_pred[-1], color='red')
        plt.scatter(oDataSet.attributes[i], y_true[-1], color='green')
    plt.show()
    oData.model = mpl
    oData.params = {"k_fold": K_FOLD, "GRID_RESULT": grid_result, "GRID_VALUES": GRID, "LEARNING RATE": LEARNING_RATE,
                    "EPOCHS": epochs, "MSE": mean_squared_error(y_true, y_pred),
                    "RMSE": np.sqrt(mean_squared_error(y_true, y_pred))}

    oDataSet.append(oData)
oExp.add_data_set(oDataSet,
                  description="  Experimento Artificial MLP 20 realizaçoes.".format())
oExp.save("Objects/EXP02_1_LP_20.gzip".format())
Ejemplo n.º 2
0
    oDataSet.add_sample_of_attribute(
        np.array(list(np.float32(y)) + [classes[x]]))
oDataSet.attributes = oDataSet.attributes.astype(float)
oDataSet.normalize_data_set()
for j in range(20):
    print(j)
    oData = Data(len(oDataSet.labelsNames), 31, samples=50)
    oData.random_training_test_by_percent(
        np.unique(classes, return_counts=True)[1], 0.8)
    perc = Layered_perceptron_Logistic(learning_rate,
                                       len(oDataSet.labelsNames))
    perc.train(oDataSet.attributes[oData.Training_indexes],
               oDataSet.labels[oData.Training_indexes], epochs)
    oData.model = perc
    oData.confusion_matrix = np.zeros(
        (len(oDataSet.labelsNames), len(oDataSet.labelsNames)))
    for i in oData.Testing_indexes:
        data = np.matrix(np.hstack(([-1], oDataSet.attributes[i]))).T
        predicted = perc.predict(data)
        oData.confusion_matrix[int(oDataSet.labels[i]), predicted] += 1
    print(oData)
    oDataSet.append(oData)
oExp.add_data_set(
    oDataSet,
    description="  Experimento Dermatologia LP 20 realizaçoes.".format())
oExp.save("Objects/EXP01_4_LP_20.gzip".format())

oExp = Experiment.load("Objects/EXP01_4_LP_20.gzip".format())
print(oExp)
print(oExp.experimentResults[0].sum_confusion_matrix)
Ejemplo n.º 3
0
    ax1 = fig.add_subplot(111)
    # ax2 = ax1.twiny()
    p = [oDataSet.attributes[0], oDataSet.attributes[-1]]
    res = []
    for i in p:
        data = np.matrix(np.hstack(([-1], i))).T
        predict = perc.predict(data)[0, 0]
        res.append([i, predict])
    res = np.array(res)
    ax1.plot(base[[0, -1]], res[:, 1])

    p = [base[0], base[-1]]
    res = []
    for i in p:
        predict = 2 * i + 3
        res.append([i, predict])
    res = np.array(res)
    ax1.plot(res[:, 0], res[:, 1])

    plt.show()
    oData.params = {
        "MSE": ert / oData.Testing_indexes.shape[0],
        "RMSE": np.sqrt(ert / oData.Testing_indexes.shape[0])
    }

    print(oData.params)
    oDataSet.append(oData)

oExp.add_data_set(oDataSet)
oExp.save("Objects/EXP01_DT1_20.gzip")
Ejemplo n.º 4
0
    model.fit(oDataSet.attributes[oData.Training_indexes],
              binarizer(oDataSet.labels[oData.Training_indexes]),
              batch_size=50,
              epochs=epochs,
              verbose=1)

    y_pred = model.predict(
        oDataSet.attributes[oData.Testing_indexes]).argmax(axis=1)
    y_true = oDataSet.labels[oData.Testing_indexes]

    print(accuracy_score(y_true, y_pred))
    print(confusion_matrix(y_true, y_pred))
    oData.confusion_matrix = confusion_matrix(y_true, y_pred)
    model.save('model.h5')
    myArr = None
    with open("model.h5", "rb") as binaryfile:
        myArr = bytearray(binaryfile.read())
    oData.model = myArr, model.history.history['loss']
    oData.params = {
        "k_fold": K_FOLD,
        "GRID_RESULT": grid_result,
        "GRID_VALUES_NEURON": GRID_NEURON,
        "GRID_VALUES_BETA": GRID_B,
        "LEARNING RATE": LEARNING_RATE,
        "EPOCHS": epochs
    }
    oDataSet.append(oData)
    print(oData)
oExp.add_data_set(
    oDataSet, description="  Experimento cancer LP 20 realizaçoes.".format())
oExp.save("Objects/EXP01_5_LP_20.gzip".format())
Ejemplo n.º 5
0
COLOR = cm.rainbow(np.linspace(0, 1, 5))
learning_rate = 0.10
epochs = 30000

oExp = Experiment()

oDataSet = DataSet()
base = np.loadtxt("Datasets/column_3C.dat", usecols=range(6), delimiter=" ")
classes = np.loadtxt("Datasets/column_3C.dat", dtype=object, usecols=-1, delimiter=" ")

for x, y in enumerate(base):
    oDataSet.add_sample_of_attribute(np.array(list(np.float32(y)) + [classes[x]]))
oDataSet.attributes = oDataSet.attributes.astype(float)
oDataSet.normalize_data_set()
oExp.add_data_set(oDataSet,
                  description="  Experimento COLUNA 3C LP 20 realizaçoes. com 30000 epocas".format())
for j in range(20):
    print(j)
    oData = Data(len(oDataSet.labelsNames), 31, samples=50)
    oData.random_training_test_by_percent([60, 150, 100], 0.8)
    perc = Layered_perceptron_Logistic(learning_rate, len(oDataSet.labelsNames))
    perc.train(oDataSet.attributes[oData.Training_indexes], oDataSet.labels[oData.Training_indexes], epochs)
    oData.model = perc
    oData.confusion_matrix = np.zeros((len(oDataSet.labelsNames), len(oDataSet.labelsNames)))
    for i in oData.Testing_indexes:
        data = np.matrix(np.hstack(([-1], oDataSet.attributes[i]))).T
        predicted = perc.predict(data)
        oData.confusion_matrix[int(oDataSet.labels[i]), predicted] += 1
    oDataSet.append(oData)
    oExp.experimentResults[0] = oDataSet
    oExp.save("Objects/EXP02_3_LP_20.gzip".format())
Ejemplo n.º 6
0
oDataSet = DataSet()
base = np.loadtxt("Datasets/dataset2.txt", usecols=range(2), delimiter=",")
classes = np.loadtxt("Datasets/dataset2.txt", usecols=-1, delimiter=",")

for x, y in enumerate(base):
    oDataSet.add_sample_of_attribute(
        np.array(list(np.float32(y)) + [classes[x]]))
oDataSet.attributes = oDataSet.attributes.astype(float)
# oDataSet.normalize_data_set()
for j in range(20):
    print(j)
    oData = Data(2, 31, samples=50)
    oData.random_training_test_by_percent([600, 600], 0.8)
    perc = Perceptron(learning_rate)
    perc.train(oDataSet.attributes[oData.Training_indexes],
               oDataSet.labels[oData.Training_indexes], epochs)
    oData.model = perc
    oData.confusion_matrix = np.zeros((2, 2))
    for i in oData.Testing_indexes:
        data = np.matrix(np.hstack(([-1], oDataSet.attributes[i]))).T
        oData.confusion_matrix[int(oDataSet.labels[i]),
                               perc.predict(data)] += 1
    oDataSet.append(oData)
oExp.add_data_set(
    oDataSet, description="  Experimento dataset2 PS 20 realizaçoes.".format())
oExp.save("Objects/EXP02_PS_20.gzip".format())

oExp = Experiment.load("Objects/EXP02_PS_20.gzip".format())
print(oExp)
print(oExp.experimentResults[0].sum_confusion_matrix)
Ejemplo n.º 7
0
for x, y in enumerate(base):
    oDataSet.add_sample_of_attribute(
        np.array(list(np.float32(y)) + [classes[x]]))
oDataSet.attributes = oDataSet.attributes.astype(float)
# oDataSet.normalize_data_set()
for j in range(20):
    print(j)
    oData = Data(len(oDataSet.labelsNames), 31, samples=50)
    oData.random_training_test_by_percent([50, 50, 50], 0.8)
    perc = Layered_perceptron_Logistic(learning_rate,
                                       len(oDataSet.labelsNames))
    perc.train(oDataSet.attributes[oData.Training_indexes],
               oDataSet.labels[oData.Training_indexes], epochs)
    oData.model = perc
    oData.confusion_matrix = np.zeros(
        (len(oDataSet.labelsNames), len(oDataSet.labelsNames)))
    for i in oData.Testing_indexes:
        data = np.matrix(np.hstack(([-1], oDataSet.attributes[i]))).T
        predicted = perc.predict(data)
        oData.confusion_matrix[int(oDataSet.labels[i]), predicted] += 1
    print(oData)
    oDataSet.append(oData)
oExp.add_data_set(
    oDataSet,
    description="  Experimento Iris LP 20 realizaçoes. 2 atributos".format())
oExp.save("Objects/EXP02_2_LP_20.gzip".format())

oExp = Experiment.load("Objects/EXP02_2_LP_20.gzip".format())
print(oExp)
print(oExp.experimentResults[0].sum_confusion_matrix)
Ejemplo n.º 8
0
            grid_result[g1, k_slice] = mean_squared_error(y_true, y_pred)
            k_slice += 1
            print(k_slice)
    print(grid_result, j)
    best_p = GRID[np.argmin(np.mean(grid_result, axis=1))]
    mpl = multi_Layered_perceptron_linear(
        LEARNING_RATE, (oDataSet.attributes.shape[1], best_p, 1))
    mpl.train_regression(oDataSet.attributes[oData.Training_indexes],
                         oDataSet.labels[oData.Training_indexes], epochs)
    y_pred = []
    y_true = []
    for i in oData.Testing_indexes:
        y_pred.append(mpl.predict(oDataSet.attributes[i])[0, 0])
        y_true.append(oDataSet.labels[i])
    oData.model = mpl
    oData.params = {
        "k_fold": K_FOLD,
        "GRID_RESULT": grid_result,
        "GRID_VALUES": GRID,
        "LEARNING RATE": LEARNING_RATE,
        "EPOCHS": epochs,
        "MSE": mean_squared_error(y_true, y_pred),
        "RMSE": np.sqrt(mean_squared_error(y_true, y_pred))
    }

    oDataSet.append(oData)
oExp.add_data_set(
    oDataSet,
    description="  Experimento Temperatura rotor MLP 20 realizaçoes.".format())
oExp.save("Objects/EXP02_4_LP_20.gzip".format())
Ejemplo n.º 9
0
    model.compile(loss='mse', optimizer=_OPTIMIZER)
    model.fit(oDataSet.attributes[oData.Training_indexes],
              oDataSet.labels[oData.Training_indexes],
              batch_size=50,
              epochs=epochs,
              verbose=1)

    y_pred = model.predict(oDataSet.attributes[oData.Testing_indexes])
    y_true = oDataSet.labels[oData.Testing_indexes]

    model.save('model.h5')
    myArr = None
    with open("model.h5", "rb") as binaryfile:
        myArr = bytearray(binaryfile.read())
    oData.model = myArr, model.history.history['loss']
    oData.params = {
        "k_fold": K_FOLD,
        "GRID_RESULT": grid_result,
        "GRID_VALUES": (best_b, best_p),
        "LEARNING RATE": LEARNING_RATE,
        "EPOCHS": epochs,
        "MSE": mean_squared_error(y_true, y_pred),
        "RMSE": np.sqrt(mean_squared_error(y_true, y_pred))
    }

    oDataSet.append(oData)
oExp.add_data_set(
    oDataSet,
    description="  Experimento Consumo Gasolina MLP 20 realizaçoes.".format())
oExp.save("Objects/EXP02_3_LP_20.gzip".format())