Example #1
0
 def setUp(self) -> None:
     """
     Sets up unittest
     """
     self.dataset = import_data(DATASET)
     self.classes = np.unique(self.dataset[-1])
     maximum = 0
     minimum = self.dataset.shape[-1]
     for a_class in self.classes:
         temp = (self.dataset[-1] == a_class).sum()
         maximum = max(maximum, temp)
         minimum = min(minimum, temp)
         del temp
     self.max_representation = maximum
     self.min_representation = minimum
Example #2
0
    args = parser.parse_args()

    # Initialize network
    network = NeuralNetwork(4, [6], 3, [tanh, sigmoid], LR)
    filename = "network"
    type_net = "Neural"
    k_fold = ""

    if args.normalize:
        network = NormalizedNetwork(4, [6], 3, [tanh, sigmoid], LR)
        type_net = "Normalized"
        filename = type_net.lower()

    # iris dataset
    dataset = import_data("../../data/iris.data")

    labels, encoding = one_hot_encoding(dataset[-1])
    classes = list(encoding.keys())
    dataset = dataset[0:-1]

    # Define Trainer
    trainer = StandardTrainer(dataset, labels.T, TRAIN_SIZE)
    k = 1

    if args.cross_validation is not None:
        k = args.cross_validation
        k_fold = "_{}fold".format(k)
        trainer = KFoldTrainer(k, 2, dataset, labels.T)

    fig = plt.figure(figsize=FIG_SIZE)
 def test_import_data(self):
     dataset_path = path.abspath("{}/../../data/iris.data".format(
         self.file_path))
     dataset_iris = import_data(dataset_path)
     assert type(dataset_iris) == np.ndarray
     assert dataset_iris.shape[0] == 5
def train_evaluate(architecture: dict, dataset_name: str) -> NeuralNetwork:
    """
    Train and evaluate a Network

    :param architecture: Architecture of NeuralNetwork (above)
    :param dataset_name: Dataset to use
    :return: Trained Neural Network
    """
    # import dataset
    dataset = import_data("../data/{}.data".format(dataset_name))

    dataset = oversample(dataset)
    more_info = "(oversample)"

    labels, encoding = one_hot_encoding(dataset[-1])
    classes = list(encoding.keys())
    dataset = np.delete(dataset, -1, 0)

    dataset = np.delete(dataset, [0], 0)

    # Initialize network
    logging.info("Input size: {}\tOutput size: {}".format(
        dataset.shape[0], len(encoding)))
    network = NeuralNetwork(dataset.shape[0], architecture["INTERNAL_LAYERS"],
                            len(encoding), architecture["ACT_FUNCS"],
                            architecture["LR"])

    # Define Trainer
    trainer = StandardTrainer(dataset, labels.T, TRAIN_SIZE)

    fig = plt.figure(figsize=FIG_SIZE)
    fig.subplots_adjust(wspace=0.3)
    ax = fig.add_subplot(121)
    ax2 = ax.twinx()
    ax3 = fig.add_subplot(122)

    trained, (learn, costs) = trainer.train(network,
                                            epochs=EPOCHS,
                                            repeat=True)

    prediction = trainer.evaluate(trained)

    c_m = confusion_matrix(prediction, trainer.get_labels())

    line = ax.plot(learn, label="Learning Curve", linewidth=2.5, c="b")

    line2 = ax2.plot(costs, label="MSE", linestyle="--", linewidth=2.5, c="r")
    lines = line + line2

    ax.set_ylabel("Learning Curve", fontsize=FONT_SIZE)
    ax.set_xlabel("Epochs", fontsize=FONT_SIZE)
    ax.set_title("Network on {}\n".format(dataset_name), fontsize=TITLE_SIZE)
    ax.grid()

    ax2.set_ylabel("Cost", fontsize=FONT_SIZE)
    ax2.grid()

    labels = [l.get_label() for l in lines]
    ax2.legend(lines, labels, fontsize=FONT_SIZE, loc="center right")

    show_matrix(
        ax3, c_m,
        (classes, ["Predicted\n{}".format(a_class) for a_class in classes]),
        "Confusion Matrix of Test Set\n", FONT_SIZE, TITLE_SIZE)

    measures = {
        "accuracy": accuracy(c_m),
        "precision": precision(c_m),
        "recall": recall(c_m),
        "f1_score": f1_score(c_m)
    }

    print("Summary on {}:\n".format(dataset))
    report_results(c_m)

    plt.savefig("../results/Network_on_{}{}.png".format(
        dataset_name, more_info))

    return trained
Example #5
0
    if args.architecture == "short":
        architecture = SHORT
    elif args.architecture == "long":
        architecture = LONG
    elif args.architecture == "big":
        architecture = BIG
    else:
        raise AttributeError("Options: short, long, big")

    if args.dataset == "uci":
        path_dataset = "Data_for_UCI_named.csv"
    else:
        path_dataset = args.dataset + ".data"

    # import dataset
    dataset = import_data("../data/{}".format(path_dataset), sep=args.sep, header=args.header)

    more_info = ""

    if args.oversample:
        dataset = oversample(dataset, label=args.labels)
        more_info = "(oversampled)"
    if args.undersample:
        dataset = undersample(dataset, label=args.labels)
        more_info = "(undersampled)"

    labels, encoding = one_hot_encoding(dataset[args.labels])
    classes = list(encoding.keys())
    dataset = np.delete(dataset, args.labels, 0)

    encodings = list()
FIG_SIZE = (20, 20)
TRAIN_SIZE = 0.8
LR = 0.5

np.random.seed(123)
seed(123)


if __name__ == '__main__':
    logging.basicConfig(level=logging.INFO)

    # Initialize neuron
    neuron = Neuron("On iris", 4, sigmoid, LR)

    # iris dataset
    dataset = import_data("../../data/iris.data").T[0:100].T

    dataset[4][0:50] = 1.0
    dataset[4][50:100] = 0.0

    train_set, test_set = split_set(dataset, TRAIN_SIZE)

    epochs = []
    accuracies = []
    recalls = []

    for x1, x2, x3, x4, label in train_set.T.tolist():
        epochs.append(
            neuron.train([x1, x2, x3, x4], label)
        )
        prediction = []