Example #1
0
oExp = Experiment()

oDataSet = DataSet()
base = np.loadtxt("Datasets/artifitial1.data", usecols=range(1), delimiter=",")
classes = np.loadtxt("Datasets/artifitial1.data", usecols=-1, delimiter=",")


for x, y in enumerate(base):
    oDataSet.add_sample_of_attribute(np.array(list([np.float32(y)]) + [classes[x]]))
oDataSet.attributes = oDataSet.attributes.astype(float)
oDataSet.normalize_data_set()
oDataSet.labels = np.array([classes]).T

for j in range(20):
    slices = KFold(n_splits=K_FOLD)
    oData = Data(1, 31, samples=50)
    indices = np.arange(oDataSet.attributes.shape[0])
    np.random.shuffle(indices)
    oData.Testing_indexes = indices[int(oDataSet.attributes.shape[0] * 0.85):]
    oData.Training_indexes = indices[:int(oDataSet.attributes.shape[0] * 0.85)]

    grid_result = np.zeros((len(GRID), K_FOLD))
    for g1, g_param in enumerate(GRID):
        k_slice = 0
        for train, test in slices.split(oData.Training_indexes):
            mpl = multi_Layered_perceptron_linear(LEARNING_RATE, (oDataSet.attributes.shape[1], g_param, 1))
            mpl.train_regression(oDataSet.attributes[oData.Training_indexes[train]],
                                 oDataSet.labels[oData.Training_indexes[train]], epochs)
            y_pred = []
            y_true = []
            for i in test:
Example #2
0
                  usecols=range(34),
                  dtype=int,
                  delimiter=",")
classes = np.loadtxt("Datasets/dermatology.data",
                     dtype=int,
                     usecols=-1,
                     delimiter=",")

for x, y in enumerate(base):
    oDataSet.add_sample_of_attribute(
        np.array(list(np.float32(y)) + [classes[x]]))
oDataSet.attributes = oDataSet.attributes.astype(float)
oDataSet.normalize_data_set()
for j in range(20):
    print(j)
    oData = Data(len(oDataSet.labelsNames), 31, samples=50)
    oData.random_training_test_by_percent(
        np.unique(classes, return_counts=True)[1], 0.8)
    perc = Layered_perceptron_Logistic(learning_rate,
                                       len(oDataSet.labelsNames))
    perc.train(oDataSet.attributes[oData.Training_indexes],
               oDataSet.labels[oData.Training_indexes], epochs)
    oData.model = perc
    oData.confusion_matrix = np.zeros(
        (len(oDataSet.labelsNames), len(oDataSet.labelsNames)))
    for i in oData.Testing_indexes:
        data = np.matrix(np.hstack(([-1], oDataSet.attributes[i]))).T
        predicted = perc.predict(data)
        oData.confusion_matrix[int(oDataSet.labels[i]), predicted] += 1
    print(oData)
    oDataSet.append(oData)
Example #3
0
learning_rate = 0.01
epochs = 5000

oExp = Experiment()

oDataSet = DataSet()
base = np.loadtxt("Datasets/artifitial1.data", usecols=range(2), delimiter=",")
classes = np.loadtxt("Datasets/artifitial1.data", dtype=int, usecols=-1, delimiter=",")

for x, y in enumerate(base):
    oDataSet.add_sample_of_attribute(np.array(list(np.float32(y)) + [classes[x]]))
oDataSet.attributes = oDataSet.attributes.astype(float)
# oDataSet.normalize_data_set()
for j in range(20):
    print(j)
    oData = Data(len(oDataSet.labelsNames), 31, samples=50)
    oData.random_training_test_by_percent([50, 50, 50], 0.8)
    perc = Layered_perceptron(learning_rate, len(oDataSet.labelsNames))
    perc.train(oDataSet.attributes[oData.Training_indexes], oDataSet.labels[oData.Training_indexes], epochs)
    oData.model = perc
    oData.confusion_matrix = np.zeros((len(oDataSet.labelsNames), len(oDataSet.labelsNames)))
    for i in oData.Testing_indexes:
        data = np.matrix(np.hstack(([-1], oDataSet.attributes[i]))).T
        predicted = perc.predict(data)
        oData.confusion_matrix[int(oDataSet.labels[i]), predicted] += 1
    print(oData)
    oDataSet.append(oData)
oExp.add_data_set(oDataSet,
                  description="  Experimento Artificial LP 20 realizaƧoes.".format())
oExp.save("Objects/EXP01_1_LP_20.gzip".format())
Example #4
0
                  dtype=int,
                  delimiter=",")
classes = np.loadtxt("Datasets/breast-cancer-wisconsin.data",
                     dtype=int,
                     usecols=-1,
                     delimiter=",")

for x, y in enumerate(base):
    oDataSet.add_sample_of_attribute(
        np.array(list(np.float32(y)) + [classes[x]]))
oDataSet.attributes = oDataSet.attributes.astype(float)
oDataSet.normalize_data_set()

for j in range(10):
    slices = KFold(n_splits=K_FOLD, shuffle=True)
    oData = Data(len(oDataSet.labelsNames), 31, samples=50)
    oData.random_training_test_by_percent(
        np.unique(classes, return_counts=True)[1], 0.8)
    grid_result = np.zeros((len(GRID_NEURON), len(GRID_B), K_FOLD))
    for g1, g_param in enumerate(GRID_NEURON):
        for g2, g2_param in enumerate(GRID_B):
            k_slice = 0
            for train, test in slices.split(oData.Training_indexes):
                K.clear_session()

                model = Sequential()
                rbflayer = RBFLayer(
                    g_param,
                    initializer=InitCentersRandom(
                        oDataSet.attributes[oData.Training_indexes[train]]),
                    betas=g2_param,
Example #5
0
oDataSet = DataSet()
base = np.loadtxt("Datasets/iris_3.data", usecols=range(4), delimiter=",")
classes = np.loadtxt("Datasets/iris_3.data",
                     dtype=object,
                     usecols=-1,
                     delimiter=",")

for x, y in enumerate(base):
    oDataSet.add_sample_of_attribute(
        np.array(list(np.float32(y)) + [classes[x]]))
oDataSet.attributes = oDataSet.attributes.astype(float)
oDataSet.normalize_data_set()
for j in range(20):
    print(j)
    oData = Data(2, 31, samples=50)
    oData.random_training_test_by_percent([100, 50], 0.8)
    perc = Perceptron(learning_rate)
    perc.train(oDataSet.attributes[oData.Training_indexes],
               oDataSet.labels[oData.Training_indexes], epochs)
    oData.model = perc
    oData.confusion_matrix = np.zeros((2, 2))
    for i in oData.Testing_indexes:
        data = np.matrix(np.hstack(([-1], oDataSet.attributes[i]))).T
        oData.confusion_matrix[int(oDataSet.labels[i]),
                               perc.predict(data)] += 1
    oDataSet.append(oData)
oExp.add_data_set(
    oDataSet, description="  Experimento iris  PS 20 realizaƧoes.".format())
oExp.save("Objects/EXP01_3_PS_20.gzip".format())
Example #6
0
oExp = Experiment()

oDataSet = DataSet()
base = np.loadtxt("Datasets/dt_1.txt", usecols=range(1), delimiter=" ")
classes = np.loadtxt("Datasets/dt_1.txt", usecols=-1, delimiter=" ")

for x, y in enumerate(base):
    oDataSet.add_sample_of_attribute(
        np.array(list([np.float32(y)]) + [classes[x]]))
oDataSet.attributes = oDataSet.attributes.astype(float)
oDataSet.normalize_data_set()
classes = np.array([classes]).T
for j in range(20):
    print(j)
    oData = Data(2, 31, samples=50)
    indices = np.arange(oDataSet.attributes.shape[0])
    np.random.shuffle(indices)
    oData.Testing_indexes = indices[int(oDataSet.attributes.shape[0] * 0.85):]
    oData.Training_indexes = indices[:int(oDataSet.attributes.shape[0] * 0.85)]

    perc = Perceptron_Adaline(learning_rate)
    perc.train(oDataSet.attributes[oData.Training_indexes],
               classes[oData.Training_indexes].copy(), epochs)
    oData.model = perc
    ert = 0
    plotar = []
    for i in oData.Testing_indexes:
        data = np.matrix(np.hstack(([-1], oDataSet.attributes[i]))).T
        predict = perc.predict(data)[0, 0]
        plotar.append([classes[i, 0], predict])
Example #7
0
for x, y in enumerate(base):
    oDataSet.add_sample_of_attribute(np.array(list([np.float32(y)]) + [classes[x]]))
oDataSet.attributes = oDataSet.attributes.astype(float)
oDataSet.normalize_data_set()
oDataSet.labels = np.array([classes]).T


for j in range(20):
    experiment = Experiment(api_key="9F7edG4BHTWFJJetI2XctSUzM",
                            project_name="mest-rn-t6-artifitial1",
                            workspace="lukkascost",
                            )
    experiment.set_name("REALIZACAO_{:02d}".format(j + 1))
    slices = KFold(n_splits=K_FOLD, shuffle=True)
    oData = Data(1, 31, samples=50)

    indices = np.arange(oDataSet.attributes.shape[0])
    np.random.shuffle(indices)
    oData.Testing_indexes = indices[int(oDataSet.attributes.shape[0] * 0.85):]
    oData.Training_indexes = indices[:int(oDataSet.attributes.shape[0] * 0.85)]

    grid_result = np.zeros((len(GRID_NEURON), len(GRID_B), K_FOLD))
    for g1, g_param in enumerate(GRID_NEURON):
        for g2, g2_param in enumerate(GRID_B):
            k_slice = 0
            for train, test in slices.split(oData.Training_indexes):
                model = Sequential()
                rbflayer = RBFLayer(g_param,
                                    initializer=InitCentersRandom(oDataSet.attributes[oData.Training_indexes[train]]),
                                    betas=g2_param,