Exemple #1
0
    model.add(rbflayer)
    model.add(Dense(len(oDataSet.labelsNames), activation='sigmoid'))
    model.compile(loss='categorical_crossentropy', optimizer=_OPTIMIZER)
    model.fit(oDataSet.attributes[oData.Training_indexes],
              binarizer(oDataSet.labels[oData.Training_indexes]),
              batch_size=50,
              epochs=epochs,
              verbose=1)

    y_pred = model.predict(
        oDataSet.attributes[oData.Testing_indexes]).argmax(axis=1)
    y_true = oDataSet.labels[oData.Testing_indexes]

    print(accuracy_score(y_true, y_pred))
    print(confusion_matrix(y_true, y_pred))
    oData.confusion_matrix = confusion_matrix(y_true, y_pred)
    model.save('model.h5')
    myArr = None
    with open("model.h5", "rb") as binaryfile:
        myArr = bytearray(binaryfile.read())
    oData.model = myArr, model.history.history['loss']
    oData.params = {
        "k_fold": K_FOLD,
        "GRID_RESULT": grid_result,
        "GRID_VALUES_NEURON": GRID_NEURON,
        "GRID_VALUES_BETA": GRID_B,
        "LEARNING RATE": LEARNING_RATE,
        "EPOCHS": epochs
    }
    oDataSet.append(oData)
    print(oData)
Exemple #2
0
for x, y in enumerate(base):
    oDataSet.add_sample_of_attribute(
        np.array(list(np.float32(y)) + [classes[x]]))
oDataSet.attributes = oDataSet.attributes.astype(float)
oDataSet.normalize_data_set()
for j in range(20):
    print(j)
    oData = Data(len(oDataSet.labelsNames), 31, samples=50)
    oData.random_training_test_by_percent(
        np.unique(classes, return_counts=True)[1], 0.8)
    perc = Layered_perceptron_Logistic(learning_rate,
                                       len(oDataSet.labelsNames))
    perc.train(oDataSet.attributes[oData.Training_indexes],
               oDataSet.labels[oData.Training_indexes], epochs)
    oData.model = perc
    oData.confusion_matrix = np.zeros(
        (len(oDataSet.labelsNames), len(oDataSet.labelsNames)))
    for i in oData.Testing_indexes:
        data = np.matrix(np.hstack(([-1], oDataSet.attributes[i]))).T
        predicted = perc.predict(data)
        oData.confusion_matrix[int(oDataSet.labels[i]), predicted] += 1
    print(oData)
    oDataSet.append(oData)
oExp.add_data_set(
    oDataSet,
    description="  Experimento Dermatologia LP 20 realizaçoes.".format())
oExp.save("Objects/EXP01_4_LP_20.gzip".format())

oExp = Experiment.load("Objects/EXP01_4_LP_20.gzip".format())
print(oExp)
print(oExp.experimentResults[0].sum_confusion_matrix)
Exemple #3
0
                     dtype=object,
                     usecols=-1,
                     delimiter=",")

for x, y in enumerate(base):
    oDataSet.add_sample_of_attribute(
        np.array(list(np.float32(y)) + [classes[x]]))
oDataSet.attributes = oDataSet.attributes.astype(float)
oDataSet.normalize_data_set()
for j in range(20):
    print(j)
    oData = Data(2, 31, samples=50)
    oData.random_training_test_by_percent([100, 50], 0.8)
    perc = Perceptron(learning_rate)
    perc.train(oDataSet.attributes[oData.Training_indexes],
               oDataSet.labels[oData.Training_indexes], epochs)
    oData.model = perc
    oData.confusion_matrix = np.zeros((2, 2))
    for i in oData.Testing_indexes:
        data = np.matrix(np.hstack(([-1], oDataSet.attributes[i]))).T
        oData.confusion_matrix[int(oDataSet.labels[i]),
                               perc.predict(data)] += 1
    oDataSet.append(oData)
oExp.add_data_set(
    oDataSet, description="  Experimento iris  PS 20 realizaçoes.".format())
oExp.save("Objects/EXP01_3_PS_20.gzip".format())

oExp = Experiment.load("Objects/EXP01_3_PS_20.gzip".format())
print(oExp)
print(oExp.experimentResults[0].sum_confusion_matrix)