Esempio n. 1
0
        'std recall': [],
        'best_cf': []
    }

    results = {
        'realization': [],
        'ACCURACY': [],
        'AUC': [],
        'MCC': [],
        'f1_score': [],
        'precision': [],
        'recall': [],
        'cf': []
    }

    base = load_mock(type='LOGICAL_AND')

    pos = base[:, :2][where(base[:, 2] == 1)[0]]
    neg = base[:, :2][where(base[:, 2] == 0)[0]]
    plt.plot(pos[:, 0], pos[:, 1], 'bo')
    plt.plot(neg[:, 0], neg[:, 1], 'ro')
    plt.show()

    C = [0, 1]

    for realization in range(20):
        train, test = split_random(base, train_percentage=.8)

        x_train = train[:, :2]
        y_train = train[:, 2]
Esempio n. 2
0
        'best_cf': [],
        'alphas': []
    }

    results = {
        'realization': [],
        'ACCURACY': [],
        # 'MCC': [],
        'f1_score': [],
        'precision': [],
        'recall': [],
        'cf': [],
        'alphas': []
    }

    base = load_mock(type='TRIANGLE_CLASSES')
    # normalizar a base
    base[['x1', 'x2']] = normalization(base[['x1', 'x2']], type='min-max')

    x = array(base[['x1', 'x2']])
    y = array(base[['y']])

    classe0 = x[np.where(y == 0)[0]]
    classe1 = x[np.where(y == 1)[0]]
    classe2 = x[np.where(y == 2)[0]]

    plt.plot(classe0[:, 0], classe0[:, 1], 'b^')
    plt.plot(classe1[:, 0], classe1[:, 1], 'go')
    plt.plot(classe2[:, 0], classe2[:, 1], 'm*')
    plt.xlabel("X1")
    plt.ylabel("X2")
Esempio n. 3
0
        'best_cf': [],
        'alphas': []
    }

    results = {
        'realization': [],
        'ACCURACY': [],
        # 'MCC': [],
        'f1_score': [],
        'precision': [],
        'recall': [],
        'cf': [],
        'alphas': []
    }

    base = pd.DataFrame(load_mock(type='LOGICAL_XOR'),
                        columns=['x1', 'x2', 'y'])
    base[['x1', 'x2']] = normalization(base[['x1', 'x2']], type='min-max')

    x = array(base[['x1', 'x2']])
    y = array(base[['y']])

    classe0 = x[np.where(y == 0)[0]]
    classe1 = x[np.where(y == 1)[0]]

    plt.plot(classe0[:, 0], classe0[:, 1], 'b^')
    plt.plot(classe1[:, 0], classe1[:, 1], 'go')
    plt.xlabel("X1")
    plt.ylabel("X2")
    plt.savefig(get_project_root() + '/run/TR-05/XOR/results/' +
                'dataset_xor_artificial.png')
Esempio n. 4
0
from mlfwk.visualization import generate_space, coloring

if __name__ == '__main__':
    print("run artificial seno")
    final_result = {
        'MSE': [],
        'std MSE': [],
        'RMSE': [],
        'std RMSE': [],
        'R2': [],
        'std R2': []
    }

    results = {'realization': [], 'MSE': [], 'RMSE': [], 'R2': []}

    base = load_mock(type='MOCK_SENO')
    # normalizar a base
    base[['x1']] = normalization(base[['x1']], type='min-max')

    sn.set_style('whitegrid')
    sn.scatterplot(data=base, x="x1", y="y", color='c')
    plt.xlabel("X1")
    plt.ylabel("Y")
    plt.savefig(get_project_root() +
                '/run/TR-05/ARTIFICIAL_REGRESSAO/results/' +
                'dataset_seno_artificial.png')
    plt.show()

    x = array(base[['x1']])
    y = array(base[['y']])
Esempio n. 5
0
        'MSE': [],
        'std MSE': [],
        'RMSE': [],
        'std RMSE': [],
        'alphas': []
    }

    results = {
        'realization': [],
        'MSE': [],
        'RMSE': [],
        'erros': [],
        'alphas': []
    }

    F, x1, x2 = load_mock(type='2D_REGRESSOR')

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    ax.scatter(x1, x2, F, color='k')
    ax.set_title("y = ax1 + bx2 + c")
    ax.set_xlabel("x1")
    ax.set_ylabel("x2")
    ax.set_zlabel("y")
    plt.savefig(get_project_root() + '/run/TR-02/ARTIFICIAL/results/' +
                'adaline_fig_2.jpg')
    plt.show()

    base = concatenate(
        [array(x1, ndmin=2),
         array(x2, ndmin=2),
Esempio n. 6
0
        'MSE': [],
        'std MSE': [],
        'RMSE': [],
        'std RMSE': [],
        'alphas': []
    }

    results = {
        'realization': [],
        'MSE': [],
        'RMSE': [],
        'erros': [],
        'alphas': []
    }

    F, x = load_mock(type='LINEAR_REGRESSOR')

    # plt.plot(x, F, 'bo', color='k')
    # # plt.plot(array(x, ndmin=2).T, regressor_adaline.predict(array(x, ndmin=2).T))
    # plt.xlabel('x')
    # plt.ylabel('y')
    # plt.savefig(get_project_root() + '/run/TR-02/ARTIFICIAL/results/' + 'adaline_fig_1.jpg')
    # plt.show()

    plt.style.use('default')
    plt.style.use('ggplot')

    fig, ax = plt.subplots(figsize=(8, 4))

    # ax.plot(array(x, ndmin=2).T, regressor_adaline.predict(array(x, ndmin=2).T), color='k', label='g(x)')
    ax.scatter(x,