def _train(eta_field, epoch_field):
    try:
        eta = float(eta_field.get())
        epoch_limit = int(epoch_field.get())

        with open('bulk_data.json', 'r') as json_file:
            data = json.load(json_file)

        args = {'bulk_data': data, 'weights': weights}
        # If eta and epoch_limit are equal to zero, then, the Perceptron must take default values
        # to do that, the args in initialization must be null
        if (eta != 0):
            args['eta'] = eta
        if (epoch_limit != 0):
            args['epoch_limit'] = epoch_limit

        try:
            trainer = Perceptron(**args)
            trainer.process()
            layout.weights = trainer.weights
            layout.has_been_trained = True
        except AttributeError as error:
            messagebox.showerror(error, 'Provided data not found!')

        l = layout.ax.lines.pop(2)
        wl = weakref.ref(l)
        del l
        x = np.linspace(-5, 5, 100)
        for line in trainer.lines:
            y = (line[0] - (line[1] * x)) / line[2]
            lines = layout.ax.plot(x, y, color='blue')
            l = lines.pop()
            wl = weakref.ref(l)
            layout.canvas.draw()
            l.remove()
            del l
        y = (trainer.weights[0][0] -
             (trainer.weights[0][1] * x)) / trainer.weights[0][2]
        layout.ax.plot(x, y, color='blue')
        messagebox.showinfo(
            'Perceptron training has finished',
            'The solution was found in the epoch number {}'.format(
                trainer.current_epoch))
        refresh_button.config(state='normal')
        window_error(trainer.current_epoch, trainer.error_freq)
    except ValueError as error:
        messagebox.showerror(
            error,
            'Input values must be float (for eta) and integer (for epoch limit)!'
        )
def train(instances, algorithm):
    p = None
    if algorithm == 'perceptron':
        print('stariting perceptron training')
        p = Perceptron(max_max_index, args.online_learning_rate)
        p.train(instances, args.online_training_iterations)
        print('ending training')
    # elif algorithm == 'averaged_perceptron':
    #     print('stariting averaged training')
    #     p = Perceptron(max_size, True, args.online_learning_rate)
    #     p.train(instances, args.online_training_iterations)
    #     print('ending training')

    return p
Ejemplo n.º 3
0
def iris_train_two():
    seed(1763456)
    iris_perceptron_two = Perceptron(
        "Iris perceptron 2 targets",
        [randint(-5, 5),
         randint(-5, 5),
         randint(-5, 5),
         randint(-5, 5)], randint(-5, 5))

    data = load_iris()
    inputs = list(data.data[:100])
    targets = list(data.target[:100])

    train_perceptron(iris_perceptron_two, inputs, targets)
Ejemplo n.º 4
0
Archivo: svm.py Proyecto: snirsh/IML
def all2():
    # error_svm = np.zeros((5, 500))
    # error_per = np.zeros((5, 500))
    error_svm = np.zeros((5, 1))
    error_per = np.zeros((5, 1))
    m_values = np.array([5, 10, 15, 25, 70])

    for midx, m in enumerate(m_values):

        for i in range(500):
            print(m)
            X = np.random.multivariate_normal(mean, cov, m)
            y = true_h(X)

            while (len(np.argwhere(y == 1)) == 0) or (len(
                    np.argwhere(y == -1))) == 0:
                X = np.random.multivariate_normal(mean, cov, m)
                y = true_h(X)

            test_set_X = np.random.multivariate_normal(mean, cov, 10000)
            test_set_y = true_h(test_set_X)

            svmclf.fit(X, y)

            X = np.hstack((X, np.ones((m, 1))))

            perclf = Perceptron(X, y)

            y_hat_svm = svmclf.predict(test_set_X)
            print(test_set_X)
            test_set_X = np.hstack((test_set_X, np.ones((10000, 1))))
            print(test_set_X)
            y_hat_per = perclf.predict(test_set_X)

            error_svm[midx] += calculate_error_rate(test_set_y, y_hat_svm)
            error_per[midx] += calculate_error_rate(test_set_y, y_hat_per)

    # mean_error_svm = np.mean(error_svm, axis=1 )
    # mean_error_per = np.mean(error_per, axis=1 )
    mean_error_svm = error_svm / 500
    mean_error_per = error_per / 500

    plt.plot(m_values, mean_error_svm, label='svm')
    plt.plot(m_values, mean_error_per, label='perceptron')
    plt.title('performance comparision svm & perceptron ')
    plt.xlabel('m samples')
    plt.ylabel('true rate')
    plt.legend()
    plt.show()
    plt.savefig('q5')
Ejemplo n.º 5
0
def iris_train_three():
    seed(1763456)
    iris_perceptron_three = Perceptron(
        "Iris perceptron 3 targets",
        [randint(-5, 5),
         randint(-5, 5),
         randint(-5, 5),
         randint(-5, 5)], randint(-5, 5))

    data = load_iris()
    inputs = list(data.data)
    targets = list(data.target)

    train_perceptron(iris_perceptron_three, inputs, targets)
Ejemplo n.º 6
0
class TestAND(unittest.TestCase):
    andPerceptron = Perceptron([-3, 2, 2])

    def test_1(self):
        self.assertEqual(self.andPerceptron.calculate(case1), 0)

    def test_2(self):
        self.assertEqual(self.andPerceptron.calculate(case2), 0)

    def test_3(self):
        self.assertEqual(self.andPerceptron.calculate(case3), 0)

    def test_4(self):
        self.assertEqual(self.andPerceptron.calculate(case4), 1)
Ejemplo n.º 7
0
def main():
    data = DataManager.load_data("data/data_banknote_authentication.txt")
    # remove break lines
    data = np.array([item.strip("\n") for item in data])
    data = np.array([item.split(',') for item in data])
    data = data.astype(np.float)

    # 0 for authentic and 1 for inauthentic
    df = pd.DataFrame(data)
    df[4] = df[4].astype(int)
    authentic = df[df[4] == 0]
    inauthentic = df[df[4] == 1]
    X = df.iloc[np.r_[0:200, 1100:1300], [0, 3]].values
    y = [0 if x < 200 else 1 for x in range(400)]
    plt.scatter(X[:200, 0],
                X[:200, 1],
                color='red',
                marker='o',
                label='authentic')
    plt.scatter(X[200:400, 0],
                X[200:400, 1],
                color='blue',
                marker='x',
                label='inauthentic')
    plt.xlabel('variance of Wavelet Transformed image')
    plt.ylabel('entropy of image')
    plt.legend(loc='upper left')
    plt.show()

    ppn = Perceptron(eta=0.1, n_iter=10)
    ppn.fit(X, y)

    plot_decision_regions(X, y, clasiifier=ppn)
    plt.xlabel('variance of Wavelet Transformed image')
    plt.ylabel('entropy of image')
    plt.legend(loc='upper left')
    plt.show()

    X_std = np.copy(X)
    X_std[:, 0] = (X[:, 0] - X[:, 0].mean()) / X[:, 0].std()
    X_std[:, 1] = (X[:, 1] - X[:, 1].mean()) / X[:, 1].std()

    ada = AdalineGD(n_iter=10, eta=0.01)
    ada.fit(X_std, y)
    plot_decision_regions(X_std, y, clasiifier=ada)
    plt.title('Adaline - gradient descent')
    plt.xlabel('variance of Wavelet Transformed image')
    plt.ylabel('entropy of image')
    plt.legend(loc='upper left')
    plt.show()
Ejemplo n.º 8
0
class TestNAND(unittest.TestCase):
    nandPerceptron = Perceptron([3, -2, -2])

    def test_1(self):
        self.assertEqual(self.nandPerceptron.calculate(case1), 1)

    def test_2(self):
        self.assertEqual(self.nandPerceptron.calculate(case2), 1)

    def test_3(self):
        self.assertEqual(self.nandPerceptron.calculate(case3), 1)

    def test_4(self):
        self.assertEqual(self.nandPerceptron.calculate(case4), 0)
Ejemplo n.º 9
0
class TestOR(unittest.TestCase):
    orPerceptron = Perceptron([0, 1, 1])

    def test_1(self):
        self.assertEqual(self.orPerceptron.calculate(case1), 0)

    def test_2(self):
        self.assertEqual(self.orPerceptron.calculate(case2), 1)

    def test_3(self):
        self.assertEqual(self.orPerceptron.calculate(case3), 1)

    def test_4(self):
        self.assertEqual(self.orPerceptron.calculate(case4), 1)
Ejemplo n.º 10
0
def detect_values_greater_than_five_test():
    the_perceptron = Perceptron()
    the_perceptron.train([
        [5, -1],
        [2, -1],
        [0, -1],
        [-2, -1],
    ], [1, 0, 0, 0])

    nt.assert_equal(the_perceptron.predict([8]), 1)
    nt.assert_equal(the_perceptron.predict([5]), 1)
    nt.assert_equal(the_perceptron.predict([2]), 0)
    nt.assert_equal(the_perceptron.predict([0]), 0)
    nt.assert_equal(the_perceptron.predict([-2]), 0)
Ejemplo n.º 11
0
def test_weight_initialization():
    input_dimensions = 2
    number_of_classes = 5
    model = Perceptron(input_dimensions=2, number_of_classes=number_of_classes, seed=1)
    assert model.weights.ndim == 2 and model.weights.shape[0] == number_of_classes and model.weights.shape[
        1] == input_dimensions + 1
    weights = np.array([[1.62434536, -0.61175641, -0.52817175],
                        [-1.07296862, 0.86540763, -2.3015387],
                        [1.74481176, -0.7612069, 0.3190391],
                        [-0.24937038, 1.46210794, -2.06014071],
                        [-0.3224172, -0.38405435, 1.13376944]])
    np.testing.assert_allclose(model.weights, weights, rtol=1e-3, atol=1e-3)
    model.initialize_all_weights_to_zeros()
    assert np.array_equal(model.weights, np.zeros((number_of_classes, input_dimensions + 1)))
def main():
    imageList = loadImages(sys.argv[1])
    numOfFeatures = 50
    numOfPixels = 4
    numOfRows = len(imageList[0].getData[0])
    numOfCols = len(imageList[0].getData[0][0])
    featureList = constructRandomFeatures(numOfRows, numOfCols, numOfFeatures,
                                          numOfPixels)

    createInputs(imageList, featureList, numOfPixels)
    perceptron = Perceptron(numOfFeatures)
    k = 150
    trainingDelta = 0.1
    trainPerceptron(perceptron, imageList, k, trainingDelta)
Ejemplo n.º 13
0
def step2_learning():
    ppn = Perceptron(eta=0.1)
    data = step1_get_data()
    X = data[0]
    y = data[1]
    # 학습한다.
    ppn.fit(X, y)
    print(ppn.errors_)
    print(ppn.w_)
    # 학습된 객체를지정한다.
    # 학습이 완료된 객체를 파일로 저장한다.
    with open('./3.IrisPerceptron/perceptron.dat', 'wb') as fp:
        pickle.dump(ppn, fp)
    print("학습 완료")
Ejemplo n.º 14
0
    def __init__(self, weight_matrix):

        super(Model, self).__init__()

        # FIXME : Hardcoded sizes need to be managed generalized
        self.summary = SentenceEmbedding(50, 10, weight_matrix)
        self.mlp = Perceptron(41).to(device)

        # Parameter matrix
        self.M = nn.Parameter(torch.Tensor(20, 20).to(device))

        init_weights(self)

        return
Ejemplo n.º 15
0
def test_set_and_get_weights():
    input_dimensions = 4
    number_of_nodes = 9
    model = Perceptron(input_dimensions=input_dimensions, number_of_nodes=number_of_nodes)
    weights=model.get_weights()
    assert weights.ndim == 2 and \
           weights.shape[0] == number_of_nodes and \
           weights.shape[1] == (input_dimensions + 1)
    model.set_weights(np.ones((number_of_nodes, input_dimensions + 1)))
    weights = model.get_weights()
    assert weights.ndim == 2 and \
           weights.shape[0] == number_of_nodes and \
           weights.shape[1] == (input_dimensions + 1)
    assert np.array_equal(model.get_weights(), np.ones((number_of_nodes, input_dimensions + 1)))
Ejemplo n.º 16
0
def test_multiclass():

    training_data = [
        ((0.5, 1.4), 0, 0),
        ((0.7, 1.8), 0, 0),
        ((0.8, 1.6), 0, 0),
        ((1.5, 0.8), 0, 1),
        ((2.0, 1.0), 0, 1),
        ((0.3, 0.5), 1, 0),
        ((0.0, 0.2), 1, 0),
        ((-0.3, 0.8), 1, 0),
        ((-0.5, -1.5), 1, 1),
        ((-1.5, -2.2), 1, 1),
    ]

    plot_values = [
        ['ro', 'bo'],
        ['rs', 'bs'],
    ]

    p1 = Perceptron(data=training_data)
    p2 = Perceptron(data=training_data, y=2)
    p1.training()
    p2.training()

    for data in training_data:
        print(data)
        plt.plot([data[0][0]], [data[0][1]], plot_values[data[1]][data[2]])
    for data in training_data:
        assert (Perceptron.vector_product(data[0], p1.weights, p1.bias) >
                p1.threshold) == data[1]
        assert (Perceptron.vector_product(data[0], p2.weights, p2.bias) >
                p1.threshold) == data[2]
    plt.plot(p1.x, p1.y)
    plt.plot(p2.x, p2.y)
    plt.axis([-5, 6, -5, 6])
    plt.show()
Ejemplo n.º 17
0
def main(data):

    # Normalise the data
    training_data = normalise(data)

    # Create the perceptron
    p = Perceptron(len(data[0][0]))

    # Number of full iterations
    epochs = 0

    # Instantiate mse for the loop
    mse = 999

    while (abs(mse - LMSE) > 0.002):

        # Epoch cumulative error
        error = 0

        # For each set in the training_data
        for value in training_data:

            # Calculate the result
            output = p.result(value[0])

            # Calculate the error
            iter_error = value[1] - output

            # Add the error to the epoch error
            error += iter_error

            # Adjust the weights based on inputs and the error
            p.weight_adjustment(value[0], iter_error)

        # Calculate the MSE - epoch error / number of sets
        mse = float(error / len(training_data))

        # Print the MSE for each epoch
        print "The MSE of %d epochs is %.10f" % (epochs, mse)

        # Every 100 epochs show the weight values
        if epochs % 100 == 0:
            print "0: %.10f - 1: %.10f - 2: %.10f - 3: %.10f" % (
                p.w[0], p.w[1], p.w[2], p.w[3])

        # Increment the epoch number
        epochs += 1

    return p
Ejemplo n.º 18
0
def step2_learing():
    ppn = Perceptron(eta=0.1)
    # print(ppn)
    data = step1_get_data()
    X = data[0]  #꽃잎 길이와 너비
    y = data[1]  #품종
    # 학습한다
    ppn.fit(X, y)
    print(ppn.errors_)
    print(ppn.w_)
    # 학습된 객체를 저장한다
    # 학습이 완료된 객체를 파일로 저장한다.
    with open('./perceptron.dat', 'wb') as fp:
        pickle.dump(ppn, fp)
    print('학습완료')
Ejemplo n.º 19
0
def kfold_validation(xtrain, ytrain,foldnum,epochnum):
    kfold = KFold(n_splits=foldnum)
    mistake = []
    for epoch in epochnum:
        total = 0
        for trainIndex, testIndex in kfold.split(xtrain):
            xTrain_k, xTest_k = xtrain[trainIndex], xtrain[testIndex]
            yTrain_k, yTest_k = ytrain[trainIndex], ytrain[testIndex]
            model = Perceptron(epoch)
            trainStats = model.train(xTrain_k, yTrain_k)
            yHat = model.predict(xTest_k)
            total += calc_mistakes(yHat, yTest_k)
        average_mistakes = total / foldnum
        mistake.append(average_mistakes)
    return mistake
Ejemplo n.º 20
0
    def __init__(self,
                 is_leaky_relu: bool = False,
                 leakage_coeff: float = 0.2):
        self._relu = activation.ReLU(
            is_leaky=is_leaky_relu,
            leakage=leakage_coeff if is_leaky_relu else 0)
        self._sigmoid = activation.Sigmoid()
        self._identity = activation.Identity()

        self._network = [[
            Perceptron(num_inputs, self._relu)
            for _ in range(hiddenlayer1_size)
        ],
                         [
                             Perceptron(hiddenlayer1_size, self._relu)
                             for _ in range(hiddenlayer2_size)
                         ],
                         [
                             Perceptron(hiddenlayer2_size, self._sigmoid)
                             for _ in range(num_outputs)
                         ]]
        self._mn_data = MNIST('./images')
        self._desired_changes = None
        self._layer_inputs = None
Ejemplo n.º 21
0
def main():
    inputs = [[0, 0], [0, 1], [1, 0], [1, 1]]

    outputs = [[0], [0], [0], [1]]

    perceptron = Perceptron(2, 1, 0.01, 500)

    # perceptron.load_weight()
    perceptron.train(inputs, outputs)

    for possible_input in inputs:
        possible_input.append(1)
        print(perceptron.predict(possible_input, 0))

    perceptron.save_weight()
Ejemplo n.º 22
0
    def __init__(self):
        """
		Crea una red neuronal para calcular XOR.
		Los perceptrones se crean con una tasa de aprendizaje de 0.3, umbral
		de error 0 y función de activación Paso unitario. Los tres perceptrones
		se entrenan completamente para su operación lógica dada.
		"""
        self.f = lambda x: 0 if x < 0 else 1
        self.tabla = list(map(list, product(*([[0, 1]] * 2))))
        self.__inputs = 2
        # h1 se entrena para calcular OR.
        self.h1 = Perceptron(2, self.f, 0.3, 0)
        print('Entrenamiento Perceptrón h1 (OR).\n')
        self.h1.entrenamiento(self.tabla, [0, 1, 1, 1], 200)
        # h2 se entrena para calcular NAND.
        self.h2 = Perceptron(2, self.f, 0.3, 0)
        print('\nEntrenamiento Perceptrón h2 (NAND).\n')
        self.h2.entrenamiento(self.tabla, [1, 1, 1, 0], 200)
        # o calcula la salida de la red. Calcula un AND
        self.o = Perceptron(2, self.f, 0.3, 0)
        print('\nEntrenamiento Perceptrón o (AND).\n')
        self.o.entrenamiento(self.tabla, [0, 0, 0, 1], 200)
        self.__salidas = [0, 0, 0]
        print('\n')
Ejemplo n.º 23
0
def train_an_OR_function_test():

    the_perceptron = Perceptron()

    the_perceptron.train([
        [1, 1],
        [0, 1],
        [1, 0],
        [0, 0],
    ], [1, 1, 1, 0])

    nt.assert_equal(the_perceptron.predict([1, 1]), 1)
    nt.assert_equal(the_perceptron.predict([1, 0]), 1)
    nt.assert_equal(the_perceptron.predict([0, 1]), 1)
    nt.assert_equal(the_perceptron.predict([0, 0]), 0)
Ejemplo n.º 24
0
    def test_logical_and(self):
        target_values = np.array([0, 0, 0, 1])
        source_inputs = np.array(([0, 0], [0, 1], [1, 0], [1, 1]))

        perceptron = Perceptron(2, binary_step)
        perceptron.train(source_inputs, target_values)

        output = perceptron.predict([1, 1])
        self.assertEqual(output, 1 & 1)
        output = perceptron.predict([0, 1])
        self.assertEqual(output, 0 & 1)
        output = perceptron.predict([1, 0])
        self.assertEqual(output, 1 & 0)
        output = perceptron.predict([0, 0])
        self.assertEqual(output, 0 & 0)
Ejemplo n.º 25
0
def test_error_calculation():
    input_dimensions = 2
    number_of_classes = 2
    model = Perceptron(input_dimensions=input_dimensions, number_of_classes=number_of_classes, seed=1)
    X_train = np.array([[-1.43815556, 0.10089809, -1.25432937, 1.48410426],
                        [-1.81784194, 0.42935033, -1.2806198, 0.06527391]])
    Y_train = np.array([[1, 0, 0, 1], [0, 1, 1, 0]])
    model.initialize_all_weights_to_zeros()
    error = []
    for k in range(20):
        model.train(X_train, Y_train, num_epochs=1, alpha=0.0001)
        print(model.calculate_percent_error(X_train, Y_train))
        error.append(model.calculate_percent_error(X_train, Y_train))
    np.testing.assert_allclose(error,
                               [0.25, 0.5, 0.5, 0.25, 0.25, 0.25, 0.5, 0.25, 0.25, 0.25, 0.25, 0.0, 0.0, 0.0, 0.0, 0.0,
                                0.0, 0.0, 0.0, 0.0], rtol=1e-3, atol=1e-3)
Ejemplo n.º 26
0
def step1_learning():
    X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
    y = np.array([-1, -1, -1, 1])

    ppn = Perceptron(eta=0.1)
    stime = time()
    ppn.fit(X, y)
    etime = time()

    print('학습에 걸린 시간:', (etime - stime))
    print('학습 중 오차가 난 개수:', ppn.errors_)

    # 학습이 완료된 객체를 파일로 저장
    with open('perceptron.dat', 'wb') as fp:
        pickle.dump(ppn, fp)
    print('머신러닝 학습 완료')
Ejemplo n.º 27
0
def main():
    df = pd.read_csv('/Users/yangchong/PycharmProjects/iris.data', header=None)
    print(df.tail())
    y = df.iloc[0:100, 4].values
    y = np.where(y == 'Iris-setosa', -1, 1)
    print(y)
    X = df.iloc[0:100, [0, 2]].values
    print(X)
    '''
    plt.scatter(X[:50, 0], X[:50, 1], color='blue', marker='o', label='setosa')
    plt.scatter(X[50:, 0], X[50:100, 1], color='red', label='versicolor', marker='x')
    plt.xlabel('petal length')
    plt.xlabel('sepal length')
    plt.legend(loc ='upper left')
    plt.show()
    '''
    ppn = Perceptron(eta=0.01, n_iter=10)
    ppn.fit(X, y)
    '''
    plt.plot(range(1, len(ppn.errors_)+1), ppn.errors_, marker='o')
    plt.xlabel('Epochs')
    plt.ylabel('Number of misclassifications')
    plt.show()
    
    plot_decision_regions(X, y, classifier=ppn)
    plt.xlabel('sepal length [cm]')
    plt.ylabel('petal length [cm]')
    plt.legend(loc='upper left')
    plt.show()
    
    fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(8, 4))# 绘制1*2个子图,
    ada1 = AdlineGD(eta=0.0001, n_iter=10).fit(X, y)
    ax[0].plot(range(1, len(ada1.cost_)+1), np.log10(ada1.cost_), marker='o')
    ax[0].set_xlabel('Epochs')
    ax[0].set_ylabel('log(Sum-squared-error')
    ax[0].set_title('Adaline-learning rate=0.01')
    plt.show()
    '''
    X_std = np.copy(X)  # 标准化:x-mean/std,numpy的方法,标准差: 标准化后均值为0,方差为1 (正太分布)
    X_std[:, 0] = (X[:, 0] - X[:, 0].mean()) / X[:, 0].std()
    X_std[:, 1] = (X[:, 1] - X[:, 1].mean()) / X[:, 1].std()

    print('xx', X_std)

    arr = X_std.ravel()
    print(arr)
    print(arr[10])
Ejemplo n.º 28
0
def training():
    inputs = training_set[:, :3]
    outputs = training_set[:, 3:]
    learning_rate = 0.5
    has_changed = True

    perceptron = Perceptron(activation_function)

    while has_changed:
        has_changed = False
        for i in range(len(inputs)):
            perceptron.inputs = inputs[i]
            if perceptron.calculate_output() != outputs[i]:
                train_step(inputs[i], perceptron, outputs[i], learning_rate)
                has_changed = True

    x =
    plt.plot([0, perceptron.inputs[0]], [-perceptron.weights[0] / perceptron.weights[2],
                                 (perceptron.weights[0] - perceptron.inputs[0] * perceptron.weights[1]) /
                               nb
    plt.show()

    return perceptron.weights


training()
# good_weights = training()
#
#
# test_set = np.array([
#     np.array([1, 0.334, 1.56]),
#     np.array([1, 0.22, 0.56]),
#     np.array([1, 1.74, 1.66]),
#     np.array([1, 3.44, 7.99])
# ])
#
#
# def test():
#     perceptron = Perceptron(activation_function, weights=good_weights)
#     for i in range(len(test_set)):
#         perceptron.inputs = test_set[i]
#         output = perceptron.calculate_output()
#         print(test_set[i])
#         print(output)
#
#
# test()
Ejemplo n.º 29
0
def main():
    # Load data
    if len(sys.argv) == 4:
        features_path = sys.argv[1]
        path_target = sys.argv[2]
        test_path = sys.argv[3]
    else:
        features_path = r'train_mx.txt'
        path_target = r'train_my.txt'
        test_path = r'test_mx.txt'

    # Get examples and classes from the input paths.
    data = np.loadtxt(features_path, dtype=float, usecols=(1, 2, 3, 4, 5, 6, 7), delimiter=',')
    target = np.genfromtxt(path_target, dtype=np.int8)
    test = np.loadtxt(test_path, dtype=float, usecols=(1, 2, 3, 4, 5, 6, 7), delimiter=',')

    # Map the 'sex' feature as dummies.
    mapped_dummies_features = map_dummies(features_path)
    mapped_dummies_test = map_dummies(test_path)
    data = np.append(data, mapped_dummies_features, axis=1)
    test = np.append(test, mapped_dummies_test, axis=1)

    # Stack targets and features.
    target = target[:, np.newaxis]
    all_data = np.c_[(data, target)]

    # Data scale
    # all_data = min_max_scaler(all_data)

    result = []
    # Training model with cross-validation and prediction.
    pr = Perceptron(eta=0.1, n_iter=100, initialize_w=False)
    validation_data = all_data
    _ = cross_val(pr, validation_data)
    result.append(pr.predict(test))

    svm = SVM(learning_rate=0.1, n_iter=1800, batch_size=15, reg=0.0001)
    validation_data = all_data
    _ = cross_val(svm, validation_data)
    result.append(svm.predict(test))

    pac = PAC(n_iter=200)
    validation_data = all_data
    _ = cross_val(pac, validation_data)
    result.append(pac.predict(test))

    print_predict(np.transpose(result).astype(int))
Ejemplo n.º 30
0
    def test_trains_for_logical_and(self):
        labels = np.array([1, 0, 0, 0])
        input_matrix = []
        input_matrix.append(np.array([1, 1]))
        input_matrix.append(np.array([1, 0]))
        input_matrix.append(np.array([0, 1]))
        input_matrix.append(np.array([0, 0]))

        perceptron = Perceptron(2, threshold=10, learning_rate=1)
        perceptron.train(input_matrix, labels)

        a = 1
        b = 1
        inputs = np.array([a, b])

        output = perceptron.predict(inputs)
        self.assertEqual(output, a & b)