Ejemplo n.º 1
0
 def test_or(self):
     weights_or = [1, 1]
     OR = Perceptron(weights_or, 0)
     self.assertEqual(OR.resolve([1, 1]), 1)
     self.assertEqual(OR.resolve([1, 0]), 1)
     self.assertEqual(OR.resolve([0, 0]), 0)
     self.assertEqual(OR.resolve([0, 1]), 1)
Ejemplo n.º 2
0
def test_vary_m():
    storage = []
    # Range of m values from 10, 5000 with increments of 100
    for m in tqdm(range(10, 5000, 100)):
        steps = []
        weights = []
        biases = []
        # Looping 20 times and taking average number of steps to get a better estimate
        for i in range(0, 20):
            # Get perceptron data with e = 0.1, m as a variable and k as 5
            data = gen.gen_perceptron_data(0.1, m, 5)
            pt = Perceptron()
            steps.append(pt.fit_perceptron(data))
            # Normalizing all the weights to make them uniform
            den = np.sqrt(
                np.square(np.linalg.norm(pt.weights)) + np.square(pt.bias_val))
            pt.weights = pt.weights / den
            pt.bias_val = pt.bias_val / den
            # Get weights of perceptron and store them
            weights.append(pt.weights)
            # Get biases of perceptron and store them
            biases.append(pt.bias_val)
        # Calculating average weights using np.mean and axis as 0
        avg_w = np.mean(weights, axis=0)
        # Calculating bias the same way
        avg_b = np.mean(biases, axis=0)
        # Getting the distance from "ideal" perceptron
        dist = get_dist_from_ideal(weights=avg_w, bias=avg_b)
        # Returning all values calculated
        storage.append(str(m) + "," + str(np.average(steps)) + "," + str(dist))
    return storage
Ejemplo n.º 3
0
 def run(self):
     TrainImages, TrainAnswers, TestImages, TestAnswers = self.getImageSets(
     )
     #build defulat perceptron
     Percep = Perceptron(self.imageSize**2, self.numBiasNodes,
                         self.numOuputNodes, self.learningRate)
     Percep.init()
     Tester = Test(TestImages, TestAnswers, Percep)
     trainTester = Test(TrainImages, TrainAnswers, Percep)
     dimension = (self.imageSize * self.imageSize +
                  self.numBiasNodes) * self.numOuputNodes
     trainer = Train(TrainImages, TrainAnswers, self.learningRate,
                     self.numPSOIterations, self.PSOSeedRadius,
                     self.psoSeedVelocity)
     self.testResults = []
     self.runTimes = []
     startTime = time.process_time()
     i = 0
     while i < self.epochs:
         Percep, TestResults = self.epoch(Percep, dimension, trainTester,
                                          trainer, Tester)
         self.testResults += [TestResults]
         currentTime = time.process_time() - startTime
         self.runTimes += [currentTime]
         i += 1
     return self.testResults, self.runTimes
Ejemplo n.º 4
0
def main():
    print("Percepton Test File")    
    #https://archive.icu.uci.edu/ml/machine-learning-databases/iris/iris.data
    df = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data',header=None)
    print df.tail()
    y = df.iloc[0:100,4].values
    y = np.where(y == 'Iris-setosa',-1,1)
    X = df.iloc[0:100,[0,2]].values
    print y
    print X
    plt.scatter(X[:50,0],X[:50,1],color='red',marker='o',label="setosa")
    plt.scatter(X[50:100,0],X[50:100,1],color='blue',marker='x',label='versicolor')
    plt.xlabel('petal length')
    plt.ylabel('sepal length')
    plt.legend(loc='upper left')
    #plt.show()
    ppn = Perceptron(eta=0.1,n_iter=10)
    ppn.fit(X,y)
    plt.figure()
    plt.plot(range(1,len(ppn.errors_)+1),ppn.errors_,marker='o')
    #plt.show()
    plot_decision_reg(X,y,classifier=ppn)
    plt.xlabel('sepal length [cm]')
    plt.ylabel('petal length [cm]')
    plt.legend(loc='upper left')
    plt.show()
Ejemplo n.º 5
0
def test_vary_e():
    storage = []
    # Range of e values from 0.01, 1 with increments of 0.02
    for e in tqdm(range(90, 101, 2)):
        steps = []
        weights = []
        biases = []
        e = e / 100
        # Looping 30 times and taking average number of steps to get a better estimate
        for i in range(0, 80):
            # Get perceptron data with e as a variable, m as 100 and k = 5
            data = gen.gen_perceptron_data(e, 100, k=5)
            pt = Perceptron()
            steps.append(pt.fit_perceptron(data))
            den = np.sqrt(
                np.square(np.linalg.norm(pt.weights)) + np.square(pt.bias_val))
            pt.weights = pt.weights / den
            pt.bias_val = pt.bias_val / den
            # Get weights and biases of perceptron and store them
            weights.append(pt.weights)
            biases.append(pt.bias_val)
        # Calculating average weights and biases using np.mean and axis as 0
        avg_w = np.mean(weights, axis=0)
        avg_b = np.mean(biases, axis=0)
        # Getting the distance from "ideal" perceptron
        dist = get_dist_from_ideal(weights=avg_w, bias=avg_b)
        # Returning all values calculated
        storage.append(str(e) + "," + str(np.average(steps)) + "," + str(dist))
    return storage
Ejemplo n.º 6
0
 def testOR(self):
     print 'Running OR Test Case:'
     perc = Perceptron([[0,0,-1], [0,1,1], [1,0,1], [1,1,1]],2, True)
     self.assertEqual(perc.classify([0,0]), -1)
     self.assertEqual(perc.classify([1,0]), 1)
     self.assertEqual(perc.classify([0,1]), 1)
     self.assertEqual(perc.classify([1,1]), 1)
Ejemplo n.º 7
0
 def test_and(self):
     weight_and = [1, 1]
     AND = Perceptron(weight_and, -1)
     self.assertEqual(AND.resolve([1, 1]), 1)
     self.assertEqual(AND.resolve([1, 0]), 0)
     self.assertEqual(AND.resolve([0, 0]), 0)
     self.assertEqual(AND.resolve([0, 1]), 0)
Ejemplo n.º 8
0
def test_vary_k():
    storage = []
    # Range of k values from 1, 200 with increments of 5
    for k in tqdm([100]):
        steps = []
        weights = []
        biases = []
        # Looping 50 times and taking average number of steps to get a better estimate
        for i in range(0, 50):
            # Get perceptron data with e = 0.05, m as 100 and k as a variable
            data = gen.gen_perceptron_data(0.05, 100, k)
            pt = Perceptron()
            steps.append(pt.fit_perceptron(data))
            den = np.sqrt(
                np.square(np.linalg.norm(pt.weights)) + np.square(pt.bias_val))
            pt.weights = pt.weights / den
            pt.bias_val = pt.bias_val / den
            # Get weights of perceptron and store them
            weights.append(pt.weights)
            # Get biases of perceptron and store them
            biases.append(pt.bias_val)
        # Calculating average weights and biases using np.mean and axis as 0
        avg_w = np.mean(weights, axis=0)
        avg_b = np.mean(biases, axis=0)
        # Getting the distance from "ideal" perceptron
        dist = get_dist_from_ideal(weights=avg_w, bias=avg_b)
        # Returning all values calculated
        storage.append(str(k) + "," + str(np.average(steps)) + "," + str(dist))
    return storage
Ejemplo n.º 9
0
    def __init__(self, Nvars=2, Nperceptrons=2, Nneurons=[2, 1]):
        self.Nperceptrons = Nperceptrons
        self.FFperceptrons = []
        self.inputDcDz = []
        """
        ###################
        Feedforward network
        ###################
        """
        ### First layer ###
        self.FFperceptrons.append(Perceptron(Nneurons[0], Nvars))

        ### Intermediate layers ###
        self.FFperceptrons.extend(
            Perceptron(Nneurons[j], self.FFperceptrons[j - 1].Nneurons)
            for j in xrange(1, self.Nperceptrons - 1))

        ### Last layer ###
        self.FFperceptrons.append(
            Perceptron(Nneurons[self.Nperceptrons - 1],
                       self.FFperceptrons[self.Nperceptrons - 2].Nneurons))
        """
        #######################
        Backpropagation network
        #######################
        """
        self.initBPnetwork()
Ejemplo n.º 10
0
def part_3():
    sep_dataset, meta = arff.loadarff("linearlySeperable.arff")
    non_sep_dataset, meta = arff.loadarff("linearlyUnseperable.arff")
    sep_dataset = sep_dataset.tolist()
    non_sep_dataset = non_sep_dataset.tolist()
    trainingsets = [
        np.array(sep_dataset, dtype=np.float64),
        np.array(non_sep_dataset, dtype=np.float64),
        np.array(non_sep_dataset + sep_dataset, dtype=np.float64)
    ]
    titles = [
        "Linearly Seperable Learning Rate Preformance",
        "Linearly Inseperable Learning Rate Preformance",
        "Mixed lin. Sep/Insep data"
    ]

    learning_rates = [.0000001, .001, .1, .2, .5, .8, .99, .99999]
    m = np.zeros((len(learning_rates), 3))

    for j, title in enumerate(titles):
        for i, rate in enumerate(learning_rates):
            P = Perceptron(rate, 2)
            accuracy, epochs = P.train(trainingsets[j], trainingsets[j])
            m[i, :] = np.array([rate, accuracy[-1], epochs])

        collabel = ["Learning Rate", "Accuracy", "Epochs"]
        rowlabel = ["" for x in range(len(learning_rates))]
        show_table(collabel, rowlabel, m, title)
Ejemplo n.º 11
0
class Grafics:
    def __init__(self):

        self.randomweights = [uniform(-2, 2), uniform(-2, 2)]
        self.bias = random()
        self.perceptron = Perceptron(pesos=self.randomweights, b=self.bias)
        self.randominputsx = []
        self.randominputsy = []
        self.perceptronanswers = []
        self.up = []

        # Training the Perceptron
        for i in range(0, 1000):
            x = uniform(0, 100)
            y = uniform(0, 100)
            self.perceptron.training([x, y], upp([x, y]))

    def plotear(self):
        # Generating random set
        for i in range(0, 200):
            self.randominputsx.append(uniform(0, 100))
            self.randominputsy.append(uniform(0, 500))
            self.perceptronanswers.append(
                self.perceptron.feed(
                    [self.randominputsx[i], self.randominputsy[i]]))

        plt.scatter(self.randominputsx,
                    self.randominputsy,
                    c=self.perceptronanswers)
        x = range(0, 101)
        y = []
        for i in range(0, 101):
            y.append(fun(x[i]))
        plt.plot(x, y)
        plt.show()
Ejemplo n.º 12
0
def compare_l_rate(filename, data):
    """
    Calculate loss for different learning rates, same batch size
    sort calculated values by loss and write to file. Also plot
    the Learning rate vs Loss graph.
    """
    l_rate_performance = []
    for l_rate in np.arange(0.001, 0.1, 0.001):
        _ = Perceptron(l_rate=l_rate)
        _.fit(data)
        l_rate_performance.append([l_rate, _.test_set_errors[-1], len(_.test_set_errors)])

    sorted_by_loss = np.array(l_rate_performance)[np.array(l_rate_performance)[:, 1].argsort()]
    f = open(filename, '+w')
    f.write("Learning Rate    Final Loss    Iteration Count")
    for i in range(len(sorted_by_loss)):
        f.write("\n{:<17.3f}{:<14.2E}{:<19.1f}"
                .format(sorted_by_loss[i, 0], sorted_by_loss[i, 1], sorted_by_loss[i, 2]))

    plt.figure()
    plt.plot(np.array(l_rate_performance)[:,0],np.array(l_rate_performance)[:,-2])
    plt.scatter(np.array(l_rate_performance)[:, 0], np.array(l_rate_performance)[:, -2], s=5, c='r')
    plt.title('Learning Rate Performance')
    plt.ylim(0, np.mean(np.array(l_rate_performance)[3:15, -2])*10)
    plt.xlabel('Learning Rate')
    plt.ylabel('Loss')
    plt.draw()
Ejemplo n.º 13
0
def train_and_test(train, train_labels, test, test_labels):
    """
    This function will train the Perceptron and
    :param train: train samples
    :param train_labels: train labels
    :param test: test samples
    :param test_labels: test labels
    :return: mean accuracy
    """

    sum = 0
    for iteration in range(0, NOF_ITERATIONS):
        n = train.shape[0]
        nof_features = train.shape[1]

        # Shuffle the data
        indices_shuffle = np.random.permutation(n)
        train_shuffle = train[indices_shuffle]
        train_labels_shuffle = train_labels[indices_shuffle]

        perceptron = Perceptron(nof_features)
        perceptron.train(train_shuffle, train_labels_shuffle)
        acc = perceptron.test(test, test_labels)
        sum += acc

    mean_acc = float(sum) / NOF_ITERATIONS
    return mean_acc
Ejemplo n.º 14
0
class DigitClassifier():
    def __init__(self):
        self.model = Perceptron()
        self.fit()

    def fit(self):
        minst = tf.keras.datasets.mnist
        (X_train, y_train), (X_test, y_test) = minst.load_data()
        num_train = 15000

        # Train set
        mask = range(num_train)
        X_train = X_train[mask]
        y_train = y_train[mask]

        X_train = np.reshape(X_train, (X_train.shape[0], -1))

        # 增加一维
        X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))])

        loss_hist = self.model.train(X_train,
                                     y_train,
                                     learning_rate=1e-7,
                                     reg=2.5e4,
                                     num_iters=1500,
                                     verbose=True)

        # Loss Profile
        # plt.plot(loss_hist)
        # plt.xlabel('Iteration number')
        # plt.ylabel('Loss value')
        # plt.show()

    def predict(self, x):
        return self.model.predict(x)
Ejemplo n.º 15
0
    def testPerceptronNetworkXOR(self):
        """
        This functions tests every possible input of the XOR perceptron network.
        """
        """ -----Create XOR network----- """
        """ Create First hidden layer """
        orPerceptron = Perceptron(weights=[1, 1], bias=-1)
        nandPerceptron = Perceptron(weights=[-1, -1], bias=1)
        firstLayer = PerceptronLayer(
            perceptrons=[orPerceptron, nandPerceptron])
        """ Create second hidden layer """
        andPerceptron = Perceptron(weights=[1, 1], bias=-2)
        secondLayer = PerceptronLayer(perceptrons=[andPerceptron])
        """ Create perceptron network """
        self.xorNetwork = PerceptronNetwork(layers=[firstLayer, secondLayer])
        """ ------Test every possible input----- """
        inputNetwork = [[[1, 1], 0], [[1, 0], 1], [[0, 1], 1], [[0, 0], 0]]

        for testInput in inputNetwork:
            """ Set the input for the perceptron network """
            self.xorNetwork.setInput(networkInput=testInput[0])
            """ Run perceptron network """
            self.xorNetwork.feedForward()
            """ Test of the output is correct """
            self.assertEqual(self.xorNetwork.output[0], testInput[1])
Ejemplo n.º 16
0
    def __init__(self, activation=None, random_state=None, learning_rate=None):
        self.activation = activation
        self.random_state = random_state
        self.learning_rate = learning_rate

        if self.activation == 'AdalineGD':
            from AdalineGD import AdalineGD
            print("Your activation is AdalineGD")
            self.classifier = AdalineGD(eta=self.learning_rate,
                                        random_state=self.random_state)
        elif self.activation == 'HyperTan':
            from HyperTan import HyperTan
            print("Your activation is HyperTan")
            self.classifier = HyperTan(eta=self.learning_rate,
                                       random_state=self.random_state)
        else:
            from Perceptron import Perceptron
            self.activation = 'Perceptron'
            print(
                "Your activation is defaulted to Perception. You can set the activation argument to 'AdalineGD' or 'HyperTan'"
            )
            self.classifier = Perceptron(eta=self.learning_rate,
                                         random_state=self.random_state)

        return
Ejemplo n.º 17
0
 def test_nand(self):
     weights_nand = [-1.0, -1.0]
     nand = Perceptron(weights_nand, 0.5)
     self.assertEqual(nand.resolve([1, 1]), 0)
     self.assertEqual(nand.resolve([1, 0]), 0)
     self.assertEqual(nand.resolve([0, 0]), 1)
     self.assertEqual(nand.resolve([0, 1]), 0)
Ejemplo n.º 18
0
def perceptronDemo():
    m = np.matrix([[1,2],[2,4],[3,6]]).T
    p = Perceptron()
    p.trainDynamicRate(m, .2, 0.01, 100)
    print p
    print "Total error: "
    print p.avgerror(m)
Ejemplo n.º 19
0
def test_run(neuron_t,
             train_x,
             train_y,
             test_label,
             activation,
             lr,
             init='range',
             weight_range=None,
             run_num=1):
    epoch_nums = np.array([])
    print(test_label)
    x = 0
    while x < run_num:
        if neuron_t == 'perceptron':
            neuron = Perceptron(2,
                                init=init,
                                weight_range=weight_range,
                                activation=activation)
        else:
            neuron = Adaline(2,
                             init=init,
                             weight_range=weight_range,
                             error_th=0.3)
        neuron.train(train_x, train_y, learning_rate=lr)
        epoch_nums = np.append(epoch_nums, neuron.epoch_num)
        x += 1
    average = np.average(epoch_nums)
    std_deviation = np.sqrt(np.average((epoch_nums - average)**2))

    print("AVG EPOCH NUM:", average)
    print("STD DEVIATION:", std_deviation)
Ejemplo n.º 20
0
def learnPerceptrons():
    MULTI = False
    HEIGHT = 50
    WIDTH = 50
    ENTRANCES = HEIGHT * WIDTH
    xmlParser = XMLParser()
    examples = xmlParser.getAllExamples()
    print("Got examples")
    perceptrons = []
    for i in range(HEIGHT):
        row = []
        for j in range(WIDTH):
            perc = Perceptron(i, j, examples, ENTRANCES)
            row.append(perc)
        perceptrons.append(row)
    print("Created perceptrons")
    beforeTime = time.process_time()
    if MULTI:
        print("Created pool")
        percPool = PerceptronPool(perceptrons, examples)
        perceptrons = percPool.learnPerc()
    else:
        for row in perceptrons:
            print("processing row %s" % str(perceptrons.index(row)))
            for perc in row:
                perc.learn()
    learnTime = elapsed_time = time.process_time() - beforeTime
    print("LearnTime : " + str(learnTime))

    xmlParser.setWeights(perceptrons)
Ejemplo n.º 21
0
def no_training_data_supplied_test():
    # given
    the_perceptron = Perceptron()
    # when
    result = the_perceptron.predict([])
    # then
    nt.assert_equal(result, None, 'Should have no result with no training data.')
Ejemplo n.º 22
0
def train_and_perceptron():
    # initialize a perceptron
    p = Perceptron(2, f)
    # set training iteration as 10
    # learning rate as 0.1
    input_vecs, labels = get_training_dataset()
    p.train(input_vecs, labels, 10, 0.1)
    return p
Ejemplo n.º 23
0
 def __init__(self, inputs, outputs, epoch):
     self.inputs = inputs
     self.outputs = outputs
     self.epoch = epoch
     # Create the perceptron
     self.randomweights = [uniform(-2, 2), uniform(-2, 2)]
     self.bias = random()
     self.perceptron = Perceptron(pesos=self.randomweights, b=self.bias)
Ejemplo n.º 24
0
def test_bipolar_perceptron(parameters):
    parameters.activation_function = Perceptron.bipolar_function
    check_perceptron_parameters(parameters)
    perceptron = Perceptron(parameters)
    epochs = perceptron.learn(learning_vectors_bipolar)
    perceptron.test(testing_vectors_bipolar)

    return epochs
Ejemplo n.º 25
0
 def __init__(self):
     self.models = [
         Perceptron(16 * 16, 1, nn.LeakyReLU()),
         Perceptron(16 * 16, 20, nn.Sigmoid()),
         Perceptron(16 * 16, 16 * 16, nn.Sigmoid())
     ]
     self.loss = [nn.MSELoss(), nn.MSELoss(), nn.MSELoss()]
     self.optimizer = self.init_optimizer(3)
def runTraining():
    ptron = Perceptron(3, None)
    total = 0
    for i in range(0, TRAININGPOINTS):
        ptron.train(training[i].inputs, training[i].answer)
        for j in range(0, TRAININGPOINTS):
            guess = ptron.feedforeward(training[j].inputs)
        total = i
    return ptron
Ejemplo n.º 27
0
def train_and_perception():
    '''
	使用and真值表训练感知器
	'''
    p = Perceptron(2, f)
    # print p.weights, p.bias
    input_vec, labels = get_training_dataset()
    p.train(input_vec, labels, 10, 0.1)
    return p
Ejemplo n.º 28
0
class LearningCurve:
    def __init__(self):
        self.randominputsx = []
        self.randominputsy = []
        self.traininginputsx = []
        self.traininginputsy = []
        self.points = 10001

        self.randomweights = [uniform(-2, 2), uniform(-2, 2)]
        self.bias = random()
        self.perceptron = Perceptron(pesos=self.randomweights, b=self.bias)
        # Training points
        for i in range(0, self.points):
            x = uniform(0, 100)
            y = uniform(0, 100)
            self.traininginputsx.append(x)
            self.traininginputsy.append(y)
        # Testing points
        for i in range(0, self.points):
            self.randominputsx.append(uniform(0, 100))
            self.randominputsy.append(uniform(0, 500))

    def learningcurve(self):
        final = []
        # Trainings
        trainings = list(range(0, 10100, 100))

        for i in range(0, len(trainings)):
            # entrenar al perceptron
            for j in range(0, trainings[i] + 1):
                self.perceptron.training(
                    [self.traininginputsx[j], self.traininginputsy[j]],
                    upp([self.traininginputsx[j], self.traininginputsy[j]]))
            perceptronanswers = []
            expected = []
            aciertos = 0
            # Conseguir las respuestas del perceptron entrenado
            for i in range(0, self.points):
                perceptronanswers.append(
                    self.perceptron.feed(
                        [self.randominputsx[i], self.randominputsy[i]]))
                # Conseguir las respuestas esperadas
                expected.append(
                    upp([self.randominputsx[i], self.randominputsy[i]]))
                if perceptronanswers[i] == expected[i]:
                    aciertos += 1
            aciertos = aciertos / self.points
            final.append(aciertos)

        plt.plot(trainings, final)
        plt.ylim((0, 1))
        plt.ylabel("Porcentaje de aciertos")
        plt.xlabel("# Entrenamientos")
        plt.title("Learning Curve")
        plt.show()
Ejemplo n.º 29
0
 def test_delta(self):
     p = Perceptron(0.1, [0.2, 0.3], 0.5)
     self.assertEqual(1., p.get_delta())
     p.adjust_delta(1)
     self.assertEqual(0.0, p.get_delta())
     p.set_delta(0.3)
     self.assertEqual(0.3, p.get_delta())
Ejemplo n.º 30
0
def guess_faces(dataset):
    i_dataset = dataset[:15, :]
    k_dataset = dataset[15:29, :]
    c_dataset = dataset[29:, :]
    ck_dataset = shuffle(np.vstack((c_dataset, k_dataset)))
    ik_dataset = shuffle(np.vstack((i_dataset, k_dataset)))
    c_dataset[:, -1] = np.zeros(16)
    ic_dataset = shuffle(np.vstack((i_dataset, c_dataset)))

    ik_tr_dataset = ik_dataset[20:, :]
    ik_te_dataset = ik_dataset[:20, :]
    ck_tr_dataset = ck_dataset[20:, :]
    ck_te_dataset = ck_dataset[:20, :]
    ic_tr_dataset = ic_dataset[21:, :]
    ic_te_dataset = ic_dataset[:21, :]

    P_ik = Perceptron(.1, 35 * 35)
    P_ic = Perceptron(.1, 35 * 35)
    P_ck = Perceptron(.1, 35 * 35)

    te_dataset = np.vstack((ik_te_dataset, ic_te_dataset, ck_te_dataset))
    correct = 0
    for x in te_dataset:
        output1, net1 = P_ik.predict(x)
        output2, net2 = P_ic.predict(x)
        output3, net3 = P_ck.predict(x)
        d = {net1: output1, net2: output2, net3: output3}

        if d[max(net1, net2, net3)] == x[-1]:
            correct += 1

    print("3 class image predictor success rate: ",
          100 * correct / te_dataset.shape[0], "%")
Ejemplo n.º 31
0
 def test_weight(self):
     p = Perceptron(0.1, [0.2, 0.3], 0.5)
     self.assertTrue(np.allclose(np.asarray([0.2, 0.3]), p.get_weights()))
     p.set_weights(np.asarray([0.4, 0.5]))
     self.assertTrue(np.allclose(np.asarray([0.4, 0.5]), p.get_weights()))
     p.adjust_weight(np.asarray([2, 4]))
     self.assertTrue(np.allclose(np.asarray([1.4, 2.5]), p.get_weights()))
Ejemplo n.º 32
0
def teste_portas_logicas(porta):
    entradas = [[0, 0], [0, 1], [1, 0], [1, 1]]
    saidas = None
    print("Porta {}".format(porta.upper()))
    if porta.upper() == "AND":
        saidas = [0, 0, 0, 1]
    elif porta.upper() == "OR":
        saidas = [0, 1, 1, 1]

    perceptron = Perceptron(entradas, saidas, 1)
    perceptron.treinar(epochs=100, verbose=False)
    for i in range(len(entradas)):
        print(perceptron.calcula_saida(entradas[i]))
Ejemplo n.º 33
0
 def testDecisionBoundary(self):
     print("\n\n>> Display the decision boundary to classify an iris in one of the two classes: setosa, versicolor")
     df=self.loadIrisData()
     # build training set with 100 records: the label is in column 5
     y=df.iloc[0:100,4].values
     # transform the classed to numerical value
     y = np.where( y == 'Iris-setosa', -1, 1)
     # take the columns 0,1: sepal and petal length as variables
     X=df.iloc[0:100,[0,2]].values
     ppn = Perceptron(0.01,10)
     ppn.fit(X,y)
     tool.displayDecisionRegions( X, y, classifier = ppn,
     xlabel='sepal length [std]',ylabel='petal length [std]',label0='Setosa',label1='Versicolor')
Ejemplo n.º 34
0
def main():

	filename = '../TestData/Perceptron/irisFull.arff'

	instances, num_classes, num_classes_per_feature, feature_names, feature_class_names, class_names, attribute_types = read_set(filename)
	training_set = instances
	test_set = instances

	setosaP = Perceptron(threshold=0, learningRate=.1, specifiedTarget=0)
	versicolorP = Perceptron(threshold=0, learningRate=.1, specifiedTarget=1)
	virginicaP = Perceptron(threshold=0, learningRate=.1, specifiedTarget=2)


	setosaP.train(training_set)
	versicolorP.train(training_set)
	virginicaP.train(training_set)

	algorithms = [setosaP, versicolorP, virginicaP]

	accuracy, confusion_matrix = iris_test(algorithms, test_set, num_classes, specifiedGoal=0)
	print("accuracy: %.3f%%" % accuracy)
	print(confusion_matrix)
Ejemplo n.º 35
0
def main(argv):
    train_dir = argv[0]
    test_dir = argv[1]

    try:
        train_file = open(train_dir, 'r')
        test_file = open(test_dir, 'r')
    except:
        print "Error: file doesn't open"
        print "Usage: python NER.py trainfile testfile"
        exit(0)

    print 'Perceptron starting...\nTraining File: %s\nTesting File %s' % (train_dir, test_dir)
    
    model = Perceptron(1)
    model.read_data(train_file, test_file)

    train_file.close()
    test_file.close()

    model.computeFeatures()

    model.train()
    model.test()
Ejemplo n.º 36
0
def runAlgo(algo, num_of_labels, iterations, train_matrix, test_matrix):
    start = time.time()
    if algo == "perceptron":
	if iterations is None:
	    print "iterations required for perceptron"
	    return
	else:
	    if iterations < 1:
		print "more than 1 iteration required"
		return
	p = Perceptron(train_matrix, num_of_labels,test_matrix, iterations )
    elif algo == "naivebayes":
	p = Naive_Bayes(train_matrix, num_of_labels,test_matrix )
    elif algo == "mira":
	if iterations is None:
	    print "iterations required for mira"
	    return
	else:
	    if iterations < 1:
		print "more than 1 iteration required"
		return
	p = MIRA(train_matrix, num_of_labels,test_matrix, iterations )
    else:
	print "algo not found"
	return

    p.preprocess()
    p.train_model()
    testpredictions = p.test_model()
    correct = 0
    incorrect = 0
    for prediction, label in zip(testpredictions, test_data_labels):
	if prediction == label:
	    correct += 1
	else:
	    incorrect +=1
	#print "%s\t%s"%(prediction, label)
    print "Final: %s"%(float(correct)/(correct + incorrect))
    print "Total Time:%s sec"%((time.time() - start))
Ejemplo n.º 37
0
Archivo: main.py Proyecto: frenos/MeMl
number_of_testdata = int(np.round(len(train_data_joggen)*0.3))
# split all values in test- and train-data
test_data_joggen = train_data_joggen[:number_of_testdata]
train_data_joggen = train_data_joggen[:-number_of_testdata]


train_data = np.concatenate((train_data_joggen, train_data_stehen), axis=0)

#myPerceptron = Perceptron(train_data, False)
#myPerceptron.pocket(200)
#generate_statistics(myPerceptron.check,myPerceptron.weight,test_data_stehen,test_data_joggen)

#myPerceptron.plot(vec=myPerceptron.weight)
#myPerceptron.pocket(300)

myPerceptron = Perceptron(train_data, False)
myPerceptron.pla(20)
myPerceptron.plot(vec=myPerceptron.weight,save=True)
generate_statistics(myPerceptron.check,myPerceptron.weight,test_data_stehen,test_data_joggen)

myPerceptron.pocket(300)

#myKNN = NearestNeighbors(traindata=train_data)
#generate_statistics(myKNN.check,0,test_data_stehen,test_data_joggen)

print("bla")
################
#for myvalue in train_data:
#    if not myPerceptron.check_no_Weight(myvalue):
#        print("nichts klar beim pla")
Ejemplo n.º 38
0
def main(argv):

	# for debugging
	random.seed(1)

	# Handle User input
	trainFile = ''
	n = 1         # default to training on the whole set
	l = .01
	e = 1

	if len(sys.argv) == 5:
		trainFile = sys.argv[1]
		n = int(sys.argv[2])
		l = float(sys.argv[3])
		e = int(sys.argv[4])
	else:
		sys.exit("Bad input: Please provide a test file, number of folds, \
		 learning rate, and training epochs")


	# read in dataset
	dset = readFile(trainFile)

	# split the dataset into stratified folds
	allPos = [inst for inst in dset.instances if inst[-1] == dset.labels[1]]
	allNeg = [inst for inst in dset.instances if inst[-1] == dset.labels[0]]
	numPosPerSet = int(round(float(len(allPos))/n))
	numNegPerSet = int(round(float(len(allNeg))/n))
	
	
	# a better way of assigning folds
	foldAssignList = [0 for i in range(len(dset.instances))]
	currentFold = 1
	foldSize = 0
	for i in range(len(dset.instances)):
		if foldSize >= numPosPerSet:
			currentFold +=1
			foldSize = 0
		if currentFold > n:
			currentFold = n
		if dset.instances[i][-1] == dset.labels[1]:
			foldAssignList[i] = currentFold
			foldSize += 1

	currentFold = 1
	foldSize = 0
	for i in range(len(dset.instances)):
		if foldSize >= numNegPerSet:
			currentFold +=1
			foldSize = 0
		if currentFold > n:
			currentFold = n
		if dset.instances[i][-1] == dset.labels[0]:
			foldAssignList[i] = currentFold
			foldSize += 1
	
	folds = []
	for i in range(n):
		folds.append([dset.instances[j] for j in range(len(dset.instances)) if foldAssignList[j]-1 == i])


	netList = []
	for j in range(len(folds)):
		testFold = j
		trainSet = []
		for i in range(len(folds)):
			if i != testFold:
				trainSet.extend(folds[i])

		nnet = Perceptron(trainSet, dset.attributes, dset.labels, l, e, .1)
		netList.append(nnet)

	
	# classify all of the instances
	for i in range(len(dset.instances)):
		fold = foldAssignList[i]
		nnet = netList[fold-1]
		out = nnet.calculateOutput(dset.instances[i])
		ind = 1 if out > .5 else 0
		classOut = dset.labels[ind]
		actual = dset.instances[i][-1]
		print("fold: " + str(fold) + ", predicted: " + classOut + ", actual: " + actual + ", confidence: " + str(out))
from Perceptron import Perceptron
from DatasetLoader import DatasetLoader
from pprint import pprint

dataset = DatasetLoader('resources/dataset.csv')

p = Perceptron()

# pop = p.create_initial_population();
# t, f = p.sort_by_best(pop, dataset.x, dataset.y)
# print(t)
# print(f)

# pop = p.select(pop, dataset.x, dataset.y)
# [print(i) for i in pop]

# pop = p.crossover(pop)
# [print(i) for i in pop]

# t, f = p.sort_by_best(pop, dataset.x, dataset.y)
# print(t)
# print(f)


# pop = p.mutate(pop)
# [print(i) for i in pop]

# t, f = p.sort_by_best(pop, dataset.x, dataset.y)
# print(t)
# print(f)
# p.maxError = 0
from Perceptron import Perceptron

weights = [-8.474109968136872, 3.525175929701409, 0.3503913051612264]
Brain = Perceptron(3, weights)

Brain.setInputs([1,3,-1])
print(str(Brain.getDeterminationInEnglish()))
from Perceptron import Perceptron
from DatasetLoader import DatasetLoader

dataset = DatasetLoader('resources/dataset.csv')

p = Perceptron()
pop = p.create_initial_population();
print(p.sort_by_best(pop, dataset.x, dataset.y))

#p.fit(dataset.x, dataset.y)


print(p.w)
print(p.w0)
Ejemplo n.º 42
0
class TestPerceptron(unittest.TestCase):
    def setUp(self):
        self.perceptron = Perceptron()
    
    
    def tearDown(self):
        del self.perceptron
    

    def test_setAllInputs(self):
        values = [0,1,0]
        self.perceptron.setAllInputs(values)
        self.assertEqual(self.perceptron.inputs, values)


    def test_setInput(self):
        self.perceptron.setAllInputs([0,0,0])
        n = 1
        value = 1
        self.perceptron.setInput(n, value)
        self.assertEqual(self.perceptron.inputs[n], value)
    
        
    def test_setAllWeights(self):
        values = [0,1,0]
        self.perceptron.setAllWeights(values)
        self.assertEqual(self.perceptron.weights, values)


    def test_setWeight(self):
        self.perceptron.setAllWeights([0,0,0])
        n = 1
        value = 1
        self.perceptron.setWeight(n, value)
        self.assertEqual(self.perceptron.weights[n], value)
        
        
    def test_setFunc(self):
        self.perceptron.setFunc(myfunc)
        self.assertTrue(isfunction(self.perceptron.func))
        
        
    def test_calculate(self):
        self.perceptron.setAllInputs([1,1,1])
        self.perceptron.setAllWeights([1,2,3])
        self.perceptron.setFunc(myfunc)
        self.perceptron.calculate()
        self.assertTrue(self.perceptron.output)

         
    def test_get(self):
            
        self.perceptron.setAllInputs([1,1,1])
        self.perceptron.setAllWeights([1,2,3])
        self.perceptron.setFunc(myfunc)
        self.perceptron.calculate()
        self.assertTrue(self.perceptron.get())

        self.perceptron.setAllWeights([1,2,4])
        self.perceptron.calculate()
        self.assertFalse(self.perceptron.get())
Ejemplo n.º 43
0
def main():
    filename = '../TestData/Perceptron/linearlySeparable.arff'
    dataset_type = DatasetType.training
    if len(sys.argv) > 1:
        algorithm_name = sys.argv[1]

    if len(sys.argv) > 2:
        filename = sys.argv[2]

    if len(sys.argv) > 3:
        # dataset_type = DatasetType[sys.argv[3]]
        dataset_type = DatasetType.fromstring(sys.argv[3])
        print(dataset_type)

    if dataset_type is DatasetType.training:
        instances, num_classes, num_classes_per_feature, feature_names, feature_class_names, class_names, attribute_types = read_set(filename)
        training_set = instances
        test_set = instances

    elif dataset_type is DatasetType.random:
        instances, num_classes, num_classes_per_feature, feature_names, feature_class_names, class_names, attribute_types = read_set(filename)
        percent_for_training = float(sys.argv[4])
        random.shuffle(instances)
        training_set = instances[0:percent_for_training * len(instances)]
        test_set = instances[percent_for_training * len(instances):-1]

    elif dataset_type is DatasetType.static:
        training_set, num_classes, num_classes_per_feature, feature_names, feature_class_names, class_names, attribute_types = read_set(filename)
        test_set, num_classes, num_classes_per_feature, feature_names, feature_class_names, class_names, attribute_types = read_set(sys.argv[4])

    elif dataset_type is DatasetType.cross:
        data, num_classes, num_classes_per_feature, feature_names, feature_class_names, class_names, attribute_types = read_set(filename)

        random.shuffle(data)

    """
    if algorithm_name == 'multilayer':
        num_features = instances.shape[1] - 1
        algorithm = MultilayerPerceptron((num_features, 16, num_classes), 'classification')
    elif algorithm_name == 'decision':
        algorithm = DecisionTree(num_classes, num_classes_per_feature, feature_names,
                                 feature_class_names, class_names)
    """
    if algorithm_name == 'perceptron':
        algorithm = Perceptron(threshold=0, learningRate=.1)
    elif algorithm_name == 'backprop':
        algorithm = BackPropNode(hidden_node_multiplier=6, rand_weights=True, learning_rate=0.3, momentum=0.2, num_outputs=2)
    elif algorithm_name == 'decisiontree':
        algorithm = DecisionTree(debug=False, validation=False)
    elif algorithm_name == 'knn':
        algorithm = NearestNeighbor(k=7, distance_weighting=False, normalize=True)
    elif algorithm_name == 'knn_regression':
        algorithm = NearestNeighbor(k=3, regression=True, distance_weighting=True, normalize=True)
    elif algorithm_name == 'knn_mixed':
        algorithm = NearestNeighbor(k=13, distance_weighting=False, attribute_types=attribute_types)
    elif algorithm_name == 'cluster' or algorithm_name == 'cluster_mult':
        algorithm = Cluster(k=2, normalize=False)

    if algorithm_name =='knn':
        accuracies = []
        k_values = arange(1,16, 2)
        algorithm.train(training_set)

        import numpy as np

        for k in k_values:
            algorithm.k = k
            accuracy, confusion_matrix = test(algorithm, test_set, num_classes, normalize=True)
            print("k: %d, accuracy: %.3f%%" % (k, accuracy))
            accuracies.append(accuracy)

        figure()
        print "Accuracies: ", accuracies
        plot(k_values, accuracies)
        xticks(k_values)
        title('Magic Telescope Test Set Accuracy')
        xlabel('k')
        ylabel('Accuracy')

        show()

    elif algorithm_name == 'knn_regression':
        mses = []
        k_values = arange(1, 16, 2)

        algorithm.train(training_set)

        for k in k_values:
            algorithm.k = k
            mse = test_continuous(algorithm, test_set, normalize=True)
            print("k: %d, MSE: %.3f" % (k, mse))
            mses.append(mse)

        figure()
        plot(k_values, mses)
        xticks(k_values)
        title('Housing Price Prediction Test Error')
        xlabel('k')
        ylabel('MSE')

        show()

    elif algorithm_name == 'cluster':
        sses = []

        for x in xrange(5):
            algorithm.k = 4
            algorithm.centroids = []
            algorithm.train(training_set)
            sse = test_continuous(algorithm, test_set, normalize=False, only_sse=True)
            sses.append( sse )
            print("k: %d, SSE: %.3f" % (algorithm.k, sse))

        figure()
        plot(xrange(5), sses)
        xticks(xrange(5))
        title('Iris SSE Test Error - K = 4')
        xlabel('Iteration')
        ylabel('SSE')

        show()

    elif algorithm_name == 'cluster_mult':

        k_values = arange(2,8,1)
        sses = []

        for k in k_values:
            algorithm.k = k
            algorithm.centroids = []
            algorithm.train(training_set)
            sse = test_continuous(algorithm, test_set, normalize=False, only_sse=True)
            print("k: %d, SSE: %.3f" % (algorithm.k, sse))
            sses.append(sse)
        
        figure()
        plot(k_values, sses)
        xticks(k_values)
        title('Iris SSE Test ErrorTT')
        xlabel('k')
        ylabel('SSE')

        show()


    elif algorithm_name == 'knn_mixed':
        accuracies = []
        # k_values = arange(1, 16, 2)
        test_accuracies = cross_validate(algorithm, data, num_classes, num_folds=10, return_training_accuracy=False)

        print("Test: %s" % test_accuracies)
        print("Test Average Accuracy: %.3f%%" % average(test_accuracies))

    elif dataset_type != DatasetType.cross:
        algorithm.train(training_set)
        targets = instances[:, -1]
        accuracy, confusion_matrix = test(algorithm, test_set, num_classes)
        print("accuracy: %.3f%%" % accuracy)
        print(confusion_matrix)

    else:
        training_accuracies, test_accuracies = cross_validate(algorithm, data, num_classes, num_folds=10)

        print("Training: %s" % training_accuracies)
        print("Training Average Accuracy: %.3f%%" % average(training_accuracies))

        print("Test: %s" % test_accuracies)
        print("Test Average Accuracy: %.3f%%" % average(test_accuracies))

    if algorithm_name == "perceptron":
        figure()

        classDict = OrderedDict()
        for row in test_set:
            if not classDict.has_key(row[-1]):
                classDict[row[-1]] = { "x" : [], "y" : [],  "color" : np.random.rand(3,1), "marker" : np.random.random_integers(3,7)}
            classDict[row[-1]]["x"].append(row[0])
            classDict[row[-1]]["y"].append(row[1])
Ejemplo n.º 44
0
 def setUp(self):
     self.perceptron = Perceptron()
Ejemplo n.º 45
0
print(   irisDF["class"].value_counts() )

### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
X = irisDF.iloc[0:100,[0,2]].values
y = irisDF.iloc[0:100,    4].values
y = np.where(y == 'Iris-setosa',-1,1);

### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
figFILE = os.path.join(dir_output,'iris-scatter.png')
fig, ax = plt.subplots(1, 1)
ax.scatter(X[   :50,0], X[   :50,1], color = 'red',  marker = 'o', label = 'setosa'    )
ax.scatter(X[50:100,0], X[50:100,1], color = 'blue', marker = 'x', label = 'versicolor')
fig.savefig(figFILE)

### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
ppn = Perceptron(eta = 0.1, n_iter = 10)
ppn.fit(X,y)

figFILE = os.path.join(dir_output,'perceptron-error-epoch.png')
fig, ax = plt.subplots(1, 1)
ax.plot(
    range(1,len(ppn.errors_)+1),
    ppn.errors_,
    marker = 'o'
    )
ax.set_xlabel('epoch')
ax.set_ylabel('number of misclassifications')
fig.savefig(figFILE)

figFILE = os.path.join(dir_output,'decision-regions.png')
plotDecisionRegions(
ecSampleNum = 20
graph = False


for i in range(0,rounds):
    iterCount = 1
    g = Generator(lowerx=searchSpace[0],
                  upperx=searchSpace[1],
                  lowery=searchSpace[2],
                  uppery=searchSpace[3],
                  sampleSize=sampleSize
                )
    tSlope,tIntercept = g.getLine()
    trueFunction = map(lambda x: tSlope*x+tIntercept, range(rd[0],rd[1]+1))

    p = Perceptron(g.getSamples(),weights)

    xCoords = p.getXCoords()
    yCoords = p.getYCoords()
    colors = p.getColors()

    x = range(rd[0],rd[1]+1)
    y_val = map(lambda x : 0 ,range(rd[0],rd[1]+1))

    if graph == True:
        plt.ion()
        fig = plt.figure()
        ax = plt.subplot()
        ax.axis(rd)
        ax.axhline(0, color='black')
        ax.axvline(0, color='black')
Ejemplo n.º 47
0
 def __init__(self, input_num):
     '''初始化线性单元,设置输入参数的个数'''
     Perceptron.__init__(self, input_num, f)
Ejemplo n.º 48
0
def main(argv):

	# for debugging
	random.seed(1)

	# Handle User input
	trainFile = ''
	n = 1         # default to training on the whole set
	l = .01
	e = 1

	if len(sys.argv) == 5:
		trainFile = sys.argv[1]
		n = int(sys.argv[2])
		l = float(sys.argv[3])
		e = int(sys.argv[4])
	else:
		sys.exit("Bad input: Please provide a test file, number of folds, \
		 learning rate, and training epochs")


	# read in dataset
	dset = readFile(trainFile)

	# split the dataset into stratified folds
	allPos = [inst for inst in dset.instances if inst[-1] == dset.labels[1]]
	allNeg = [inst for inst in dset.instances if inst[-1] == dset.labels[0]]
	numPosPerSet = int(round(float(len(allPos))/n))
	numNegPerSet = int(round(float(len(allNeg))/n))
	

	# a better way of assigning folds
	foldAssignList = [0 for i in range(len(dset.instances))]
	currentFold = 1
	foldSize = 0
	for i in range(len(dset.instances)):
		if foldSize >= numPosPerSet:
			currentFold +=1
			foldSize = 0
		if currentFold > n:
			currentFold = n
		if dset.instances[i][-1] == dset.labels[1]:
			foldAssignList[i] = currentFold
			foldSize += 1

	currentFold = 1
	foldSize = 0
	for i in range(len(dset.instances)):
		if foldSize >= numNegPerSet:
			currentFold +=1
			foldSize = 0
		if currentFold > n:
			currentFold = n
		if dset.instances[i][-1] == dset.labels[0]:
			foldAssignList[i] = currentFold
			foldSize += 1
	
	folds = []
	for i in range(n):
		folds.append([dset.instances[j] for j in range(len(dset.instances)) if foldAssignList[j]-1 == i])


	
	# for fold in folds:
	# 	posCount = 0
	# 	negCount = 0
	# 	for inst in fold:
	# 		if inst[-1]=="Mine": 
	# 			posCount+=1
	# 		else:
	# 			negCount+=1
	# 	print("FoldPos: " + str(posCount) + ", FoldNeg: " + str(negCount))


	# generate txt file for use in graphing answer to number 5
	f = open("question5.csv", "w+")
	f.write("numEpochs,Accuracy,set\n")
	
	epochList = [1,10,100,1000]
	for epoch in epochList:
		testAccs = []
		trainAccs = []
		for j in range(len(folds)):
			testFold = j
			trainSet = []
			for i in range(len(folds)):
				if i != testFold:
					trainSet.extend(folds[i])

			nnet = Perceptron(trainSet, dset.attributes, dset.labels, l, epoch, .1)

			# classify the test instances
			testCountCorrect = 0
			for inst in folds[testFold]:
				out = nnet.calculateOutput(inst)
				ind = 1 if out > .5 else 0
				if inst[-1] == dset.labels[ind]:
					testCountCorrect += 1
			testAcc = testCountCorrect / float(len(folds[testFold]))
			testAccs.append(testAcc)

			# classify the training instances
			trainCountCorrect = 0
			for inst in trainSet:
				out = nnet.calculateOutput(inst)
				ind = 1 if out > .5 else 0
				if inst[-1] == dset.labels[ind]:
					trainCountCorrect += 1
			trainAcc = trainCountCorrect / float(len(trainSet))
			trainAccs.append(trainAcc)

		# get the average accuracy across folds for  
		trainAcc = sum(trainAccs) / len(trainAccs)
		testAcc = sum(testAccs) / len(testAccs)
		f.write(str(epoch) +','+ str(trainAcc) +',train\n')
		f.write(str(epoch) +','+ str(testAcc)  +',test\n')
Ejemplo n.º 49
0
from Perceptron import Perceptron
import numpy as np

# Initialize Input Parameters
input = np.array([50, -12, 1])

# Feedfoward Perceptron Test
print 'Initial Feedforward Testing'
p = Perceptron(len(input))              # Initialize Perceptron Instance
result = p.feedforward(input)           # Feed input into perceptron

# Display Results
print 'Input: ' + str(input)
print 'Answer: ' + str(result)
print

# Training our Perceptron
# Perceptron Parameters
input = np.array([ [50, -12, 1], [10, 10, 1], [-10, 30, 1] ])
label = np.array([ 1, -1, 1 ])

# Training
p2 = Perceptron(len(input))
result = p2.train(input, label)

# Testing
test = np.array([ [30, -10, 1], [9, 13, 1], [-12, 22, 1] ])

print 'Testing #1: ' + str([30, -10, 1])
print 'Result #1: ' + str(p2.feedforward([30, -10, 1]))
Ejemplo n.º 50
0
def main(argv):

	# for debugging
	random.seed(1)

	# Handle User input
	trainFile = ''
	n = 1         # default to training on the whole set
	l = .01
	e = 1

	if len(sys.argv) == 5:
		trainFile = sys.argv[1]
		n = int(sys.argv[2])
		l = float(sys.argv[3])
		e = int(sys.argv[4])
	else:
		sys.exit("Bad input: Please provide a test file, number of folds, \
		 learning rate, and training epochs")


	# read in dataset
	dset = readFile(trainFile)

	# split the dataset into stratified folds
	allPos = [inst for inst in dset.instances if inst[-1] == dset.labels[1]]
	allNeg = [inst for inst in dset.instances if inst[-1] == dset.labels[0]]
	numPosPerSet = int(round(float(len(allPos))/n))
	numNegPerSet = int(round(float(len(allNeg))/n))
	
	
	# a better way of assigning folds
	foldAssignList = [0 for i in range(len(dset.instances))]
	currentFold = 1
	foldSize = 0
	for i in range(len(dset.instances)):
		if foldSize >= numPosPerSet:
			currentFold +=1
			foldSize = 0
		if currentFold > n:
			currentFold = n
		if dset.instances[i][-1] == dset.labels[1]:
			foldAssignList[i] = currentFold
			foldSize += 1

	currentFold = 1
	foldSize = 0
	for i in range(len(dset.instances)):
		if foldSize >= numNegPerSet:
			currentFold +=1
			foldSize = 0
		if currentFold > n:
			currentFold = n
		if dset.instances[i][-1] == dset.labels[0]:
			foldAssignList[i] = currentFold
			foldSize += 1
	
	folds = []
	for i in range(n):
		folds.append([dset.instances[j] for j in range(len(dset.instances)) if foldAssignList[j]-1 == i])


	netList = []
	for j in range(len(folds)):
		testFold = j
		trainSet = []
		for i in range(len(folds)):
			if i != testFold:
				trainSet.extend(folds[i])

		nnet = Perceptron(trainSet, dset.attributes, dset.labels, l, e, .1)
		netList.append(nnet)

	
	# classify all of the instances
	# get tuple list of outputs and actual classes
	rocList = []
	for i in range(len(dset.instances)):
		fold = foldAssignList[i]
		nnet = netList[fold-1]
		out = nnet.calculateOutput(dset.instances[i])
		ind = 1 if out > .5 else 0
		actual = dset.instances[i][-1]
		rocList.append((out, actual))

	sortedRoc = sorted(rocList, key=lambda tup: tup[0])
	sortedRoc.reverse()

	
	# create plot coordinates for an ROC curve and write those a csv
	f = open("question6.csv", "w")
	f.write("FPR,TPR\n")
	numPos = len(allPos)
	numNeg = len(allNeg)
	TP = 0
	FP = 0
	lastTP = 0
	neg = dset.labels[0]
	for i in range(1,len(sortedRoc)):
		if (sortedRoc[i][0] != sortedRoc[i-1][0]) and (sortedRoc[i][1] == neg) and (TP > lastTP):
			TPR = TP / float(numPos)
			FPR = FP / float(numNeg)
			f.write(str(FPR) + "," + str(TPR) + "\n")
			lastTP = TP
		if sortedRoc[i][1] == dset.labels[1]:
			TP += 1
		else:
			FP += 1
	TPR = TP / float(numPos)
	FPR = FP / float(numNeg)
	f.write(str(FPR) + "," + str(TPR) + "\n")	

	f.close()
Ejemplo n.º 51
0
import matplotlib.pyplot as plt
import numpy as np

df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', header = None )
df.tail()


#extract data corresponding to 50 iris setosa and 50 iris veriscolor flowers
# and convert into 2 class labels 1 (versicolor) and -1 (setosa). Create 
#scatterplot

y = df.iloc[0:100, 4].values
y = np.where(y == 'Iris-setosa', -1,1)
x = df.iloc[0:100, [0,2]].values
plt.scatter(x[:50, 0], x[:50, 1], color = 'red', marker = 'o', label = 'setosa')
plt.scatter(x[50:100, 0], x[50:100, 1], color = 'blue', marker = 'x', label = 'versicolor')
plt.xlabel('petal length')
plt.ylabel('sepal lenght')
plt.legend(loc = 'upper left')
plt.show()

#train perceptron alg on dataset

ppn = Perceptron(eta = 0.1, n_iter = 10)
ppn.fit(x, y)
plt.plot(range(1, len(ppn.errors_)+ 1), ppn.errors_, marker = 'o')
plt.xlabel('Epochs')
plt.ylabel('Number of misclassifications')
plt.show()