コード例 #1
0
ファイル: test.py プロジェクト: m1heng/image-classification
def test_preceptorn(traindata, testdata):
    print("Initializing Perceptron")
    times = int(input("max trian time for one data set: "))
    ratio = float(input("learning ratio: "))
    totalnumber = traindata.number
    #first - try with ordered datas
    for p in range(10, 101, 10):
        images, labels = traindata.orderedout(p)
        al = []
        il = []
        pc = Perceptron(traindata.width * traindata.height,
                        traindata.labeldomain)
        for i in range(times):
            pc.train(images, labels, 1, ratio)
            x = pc.classify(testdata.images)
            a = Accuracy(x, testdata.labels)
            al.append(a * 100)
            il.append(i + 1)
            print(a * 100)
        plt.plot(il, al, label="size=%d" % (p * 0.01 * totalnumber))

    leg = plt.legend(ncol=1, shadow=True, fancybox=True)
    leg.get_frame().set_alpha(0.5)
    plt.xlabel("trainning time")
    plt.ylabel("accuracy")
    plt.show()
コード例 #2
0
ファイル: test.py プロジェクト: m1heng/image-classification
def ops():

    testdata = ImageDataSet(28, 28)
    testdata.loadImageData("digitdata/testimages", -1)
    testdata.loadLabelData("digitdata/testlabels", testdata.number)
    data = ImageDataSet(28, 28)
    data.loadImageData("digitdata/trainingimages", -1)
    data.loadLabelData("digitdata/traininglabels", data.number)
    for t in range(20, 101, 20):
        images, labels = data.shuffleout(t)
        al = []
        il = []
        pc = Perceptron(28 * 28,
                        ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'])
        for i in range(100):
            pc.train(images, labels, 1, 0.8)
            x = pc.classify(testdata.images)
            a = Accuracy(x, testdata.labels)
            al.append(a * 100)
            il.append(i + 1)
            print(a * 100)
        plt.plot(il, al, label="size=%d" % (t * 0.01 * data.number))

    leg = plt.legend(ncol=2, shadow=True, fancybox=True)
    leg.get_frame().set_alpha(0.5)
    plt.xlim([1, 100])
    plt.xlabel("trainning time")
    plt.ylabel("accuracy")

    plt.show()
コード例 #3
0
ファイル: test.py プロジェクト: m1heng/image-classification
def stdmean():
    limit = 0.7
    ratio = 0.8
    times = 5
    print("digit")
    traindata, testdata = dataloader_digit()
    sal = []
    mal = []
    pal = []

    for p in range(10, 101, 10):
        al = []
        il = []
        for i in range(times):
            images, labels = traindata.shuffleout(p)
            pc = Perceptron(traindata.width * traindata.height,
                            traindata.labeldomain)
            pc.train(images, labels, 3, ratio)
            x = pc.classify(testdata.images)
            a = Accuracy(x, testdata.labels)
            al.append(a * 100)
            il.append(i + 1)
            print(a * 100)
        sal.append(np.std(al))
        mal.append(np.mean(al))
        pal.append(p)
    plt.plot(pal, sal, label="digitdata Perceptron std")
    plt.plot(pal, mal, label="digitdata Perceptron mean")

    feature_domians = [[i for i in np.arange(0, 1.1, 0.5)]
                       for _ in range(traindata.width * traindata.height)]
    sal = []
    mal = []
    pal = []
    for p in range(10, 101, 10):
        al = []
        for i in range(3):
            images, labels = traindata.shuffleout(p)
            nb = NaiveBayes(feature_domians, traindata.labeldomain, 1)
            nb.train(images, labels)
            x = nb.classify(testdata.images)
            a = Accuracy(x, testdata.labels)
            al.append(a * 100)
        sal.append(np.std(al))
        mal.append(np.mean(al))
        pal.append(p)
        print(a)
    plt.plot(pal, sal, label="digitdata NaiveBayes std")
    plt.plot(pal, mal, label="digitdata NaiveBayes mean")

    sal = []
    mal = []
    pal = []
    for p in range(10, 101, 10):

        al = []
        il = []
        for i in range(times):
            images, labels = traindata.shuffleout(p)
            pc = NeuralNetwork((traindata.width * traindata.height, 15, 15,
                                len(traindata.labeldomain)),
                               traindata.labeldomain)
            pc.train(images, labels, 50, ratio)
            x = pc.classify(testdata.images)
            a = Accuracy(x, testdata.labels)
            al.append(a * 100)
            il.append(i + 1)
        sal.append(np.std(al))
        mal.append(np.mean(al))
        pal.append(p)
        print(a)
    plt.plot(pal, sal, label="digitdata NeuralNetwork std")
    plt.plot(pal, mal, label="digitdata NeuralNetwork mean")

    print("face")
    traindata, testdata = dataloader_face()
    sal = []
    mal = []
    pal = []

    for p in range(10, 101, 10):

        al = []
        il = []
        for i in range(times):
            images, labels = traindata.shuffleout(p)
            pc = Perceptron(traindata.width * traindata.height,
                            traindata.labeldomain)
            pc.train(images, labels, 3, ratio)
            x = pc.classify(testdata.images)
            a = Accuracy(x, testdata.labels)
            al.append(a * 100)
            il.append(i + 1)
            print(a * 100)
        sal.append(np.std(al))
        mal.append(np.mean(al))
        pal.append(p)
    plt.plot(pal, sal, label="facedata Perceptron std")
    plt.plot(pal, mal, label="facedata Perceptron mean")

    feature_domians = [[i for i in np.arange(0, 1.1, 0.5)]
                       for _ in range(traindata.width * traindata.height)]
    sal = []
    mal = []
    pal = []
    for p in range(10, 101, 10):
        al = []

        for i in range(3):
            images, labels = traindata.shuffleout(p)
            nb = NaiveBayes(feature_domians, traindata.labeldomain, 1)
            nb.train(images, labels)
            x = nb.classify(testdata.images)
            a = Accuracy(x, testdata.labels)
            al.append(a * 100)
        sal.append(np.std(al))
        mal.append(np.mean(al))
        pal.append(p)
        print(a)
    plt.plot(pal, sal, label="facedata NaiveBayes std")
    plt.plot(pal, mal, label="facedata NaiveBayes mean")

    sal = []
    mal = []
    pal = []
    for p in range(10, 101, 10):

        al = []
        il = []

        for i in range(times):
            images, labels = traindata.shuffleout(p)
            pc = NeuralNetwork((traindata.width * traindata.height, 15, 15,
                                len(traindata.labeldomain)),
                               traindata.labeldomain)
            pc.train(images, labels, 50, ratio)
            x = pc.classify(testdata.images)
            a = Accuracy(x, testdata.labels)
            al.append(a * 100)
            il.append(i + 1)
        sal.append(np.std(al))
        mal.append(np.mean(al))
        pal.append(p)
        print(a)
    plt.plot(pal, sal, label="facedata NeuralNetwork std")
    plt.plot(pal, mal, label="facedata NeuralNetwork mean")

    leg = plt.legend(ncol=1, shadow=True, fancybox=True)
    leg.get_frame().set_alpha(0.5)
    plt.xlabel("data size precentage")
    plt.ylabel("time(in second)")
    plt.show()
コード例 #4
0
ファイル: test.py プロジェクト: m1heng/image-classification
def timeana():
    import time
    limit = 0.7
    ratio = 1
    times = 200
    print("digit")
    traindata, testdata = dataloader_digit()
    fal = []
    pal = []

    for p in range(20, 101, 10):
        images, labels = traindata.orderedout(p)
        al = []
        il = []
        start = time.time()
        pc = Perceptron(traindata.width * traindata.height,
                        traindata.labeldomain)
        for i in range(times):
            pc.train(images, labels, 1, ratio)
            x = pc.classify(testdata.images)
            a = Accuracy(x, testdata.labels)
            al.append(a * 100)
            il.append(i + 1)
            print(a * 100)
            if a > limit:
                end = time.time()
                break
        fal.append(end - start)
        pal.append(p)
    plt.plot(pal, fal, label="digitdata Perceptron")

    feature_domians = [[i for i in np.arange(0, 1.1, 0.5)]
                       for _ in range(traindata.width * traindata.height)]
    fal = []
    pal = []
    for p in range(20, 101, 10):
        start = time.time()
        nb = NaiveBayes(feature_domians, traindata.labeldomain, 1)
        images, labels = traindata.orderedout(p)
        nb.train(images, labels)
        x = nb.classify(testdata.images)
        a = Accuracy(x, testdata.labels)
        end = time.time()
        fal.append(end - start)
        pal.append(p)
        print(a)
    plt.plot(pal, fal, label="digitdata NaiveBayes")

    fal = []
    pal = []
    for p in range(20, 101, 10):
        images, labels = traindata.orderedout(p)
        al = []
        il = []
        start = time.time()
        pc = NeuralNetwork((traindata.width * traindata.height, 15, 15,
                            len(traindata.labeldomain)), traindata.labeldomain)
        for i in range(times):
            pc.train(images, labels, 1, ratio)
            x = pc.classify(testdata.images)
            a = Accuracy(x, testdata.labels)
            al.append(a * 100)
            il.append(i + 1)
            print(a * 100)
            if a > limit:
                end = time.time()
                break
        fal.append(end - start)
        pal.append(p)
    plt.plot(pal, fal, label="digitdata NeuralNetwork")

    print("face")
    traindata, testdata = dataloader_face()
    fal = []
    pal = []

    for p in range(20, 101, 10):
        images, labels = traindata.orderedout(p)
        al = []
        il = []
        start = time.time()
        pc = Perceptron(traindata.width * traindata.height,
                        traindata.labeldomain)
        for i in range(times):
            pc.train(images, labels, 1, ratio)
            x = pc.classify(testdata.images)
            a = Accuracy(x, testdata.labels)
            al.append(a * 100)
            il.append(i + 1)
            print(a * 100)
            if a > limit:
                end = time.time()
                break
        fal.append(end - start)
        pal.append(p)
    plt.plot(pal, fal, label="facedata Perceptron")

    feature_domians = [[i for i in np.arange(0, 1.1, 0.5)]
                       for _ in range(traindata.width * traindata.height)]
    fal = []
    pal = []
    for p in range(20, 101, 10):
        start = time.time()
        nb = NaiveBayes(feature_domians, traindata.labeldomain, 1)
        images, labels = traindata.orderedout(p)
        nb.train(images, labels)
        x = nb.classify(testdata.images)
        a = Accuracy(x, testdata.labels)
        end = time.time()
        fal.append(end - start)
        pal.append(p)
        print(a)
    plt.plot(pal, fal, label="facedata NaiveBayes")

    fal = []
    pal = []
    for p in range(20, 101, 10):
        images, labels = traindata.orderedout(p)
        al = []
        il = []
        start = time.time()
        pc = NeuralNetwork((traindata.width * traindata.height, 15, 15,
                            len(traindata.labeldomain)), traindata.labeldomain)
        for i in range(times):
            pc.train(images, labels, 1, ratio)
            x = pc.classify(testdata.images)
            a = Accuracy(x, testdata.labels)
            al.append(a * 100)
            il.append(i + 1)
            print(a * 100)
            if a > limit:
                end = time.time()
                break
        fal.append(end - start)
        pal.append(p)
    plt.plot(pal, fal, label="facedata NeuralNetwork")

    leg = plt.legend(ncol=1, shadow=True, fancybox=True)
    leg.get_frame().set_alpha(0.5)
    plt.xlabel("data size precentage")
    plt.ylabel("time(in second)")
    plt.show()
コード例 #5
0
ファイル: test.py プロジェクト: m1heng/image-classification
def test_preceptorn_argmax_all():
    print("Initializing Perceptron")
    times = 30
    ratio = 1
    traindata, testdata = dataloader_digit()
    #first - try with ordered datas
    fal = []
    pal = []
    for p in range(10, 101, 10):
        images, labels = traindata.orderedout(p)
        al = []
        il = []
        pc = Perceptron(traindata.width * traindata.height,
                        traindata.labeldomain)
        for i in range(times):
            pc.train(images, labels, 1, ratio)
            x = pc.classify(testdata.images)
            a = Accuracy(x, testdata.labels)
            al.append(a * 100)
            il.append(i + 1)
            print(a * 100)
        fal.append(max(al))
        pal.append(p)
    plt.plot(pal, fal, label="digitdata ordered")

    fal = []
    pal = []
    for p in range(10, 101, 10):
        images, labels = traindata.shuffleout(p)
        al = []
        il = []
        pc = Perceptron(traindata.width * traindata.height,
                        traindata.labeldomain)
        for i in range(times):
            pc.train(images, labels, 1, ratio)
            x = pc.classify(testdata.images)
            a = Accuracy(x, testdata.labels)
            al.append(a * 100)
            il.append(i + 1)
            print(a * 100)
        fal.append(max(al))
        pal.append(p)
    plt.plot(pal, fal, label="digitdata random")

    traindata, testdata = dataloader_face()
    #first - try with ordered datas
    fal = []
    pal = []
    for p in range(10, 101, 10):
        images, labels = traindata.orderedout(p)
        al = []
        il = []
        pc = Perceptron(traindata.width * traindata.height,
                        traindata.labeldomain)
        for i in range(times):
            pc.train(images, labels, 1, ratio)
            x = pc.classify(testdata.images)
            a = Accuracy(x, testdata.labels)
            al.append(a * 100)
            il.append(i + 1)
            print(a * 100)
        fal.append(max(al))
        pal.append(p)
    plt.plot(pal, fal, label="facedata ordered")

    traindata, testdata = dataloader_face()
    #first - try with ordered datas
    fal = []
    pal = []
    for p in range(10, 101, 10):
        images, labels = traindata.shuffleout(p)
        al = []
        il = []
        pc = Perceptron(traindata.width * traindata.height,
                        traindata.labeldomain)
        for i in range(times):
            pc.train(images, labels, 1, ratio)
            x = pc.classify(testdata.images)
            a = Accuracy(x, testdata.labels)
            al.append(a * 100)
            il.append(i + 1)
            print(a * 100)
        fal.append(max(al))
        pal.append(p)
    plt.plot(pal, fal, label="facedata random")

    leg = plt.legend(ncol=1, shadow=True, fancybox=True)
    leg.get_frame().set_alpha(0.5)
    plt.xlabel("data size precentage")
    plt.ylabel("accuracy")
    plt.show()
コード例 #6
0
    #Gerando pontos aleatorios com base na funcao
    X = generatePoints(100)
    X_with_x0 = [ [1] + x for x in X] ##adicionando bias
    y = generateY(func, X)

    w = linear_regression(X_with_x0, y)

    # print(w)

    perc = Perceptron()
    perc._w = w


    #gerando pontos fora da amostra
    X = generatePoints(1000)
    X_with_x0 = [ [1] + x for x in X] ##adicionando bias
    y = generateY(func, X)


    #Calculando Erro dentro da amostra
    errorCount = 0
    for i in range(0,len(y)):
        x = X[i]
        if perc.classify(x) != y[i]:
            errorCount += 1
    error = errorCount/len(y)
    errors.append(error)

errors = np.array(errors)
print ('Erro fora da amostra: ', errors.mean())
コード例 #7
0
    y0 = uniform(-1, 1)
    x1 = uniform(-1, 1)
    y1 = uniform(-1, 1)
    func.buildFromPoints(x0, y0, x1, y1)
    # func._print()

    #Gerando pontos aleatorios com base na funcao
    X = generatePoints(10)
    y = generateY(func, X)

    perc = Perceptron()
    perc.train(X, y)

    #gerando novos pontos fora da amostra
    X = generatePoints(1000)
    y = generateY(func, X)

    h = [perc.classify(x) for x in X]

    # print('Target ',y)
    # print('Predicted ',h)

    errorCount = 0
    for i in range(0, len(y)):
        if y[i] != h[i]:
            errorCount += 1

    errProb = float(errorCount) / len(y)
    errors.append(errProb)

print('P(f(x) != g(x)) = ', np.array(errors).mean())
コード例 #8
0
        return np.concatenate([x0, x1, x2]), np.concatenate(
            [np.zeros(25), np.ones(25), 2 + np.zeros(25)]).astype(np.int)
    return np.concatenate([x0,
                           x1]), np.concatenate([-np.ones(25),
                                                 np.ones(25)]).astype(np.int)


x_train, y_train = create_toy_data()
x1_test, x2_test = np.meshgrid(np.linspace(-5, 5, 100),
                               np.linspace(-5, 5, 100))
x_test = np.array([x1_test, x2_test]).reshape(2, -1).T

X_train = polynomialFeaturesTransform(x_train, 1)
X_test = polynomialFeaturesTransform(x_test, 1)

model = Perceptron()
model.fit(X_train, y_train)
y = model.classify(X_test)

y[y == -1] = 0

plt.scatter(x_train[:, 0], x_train[:, 1], c=y_train)
plt.contourf(x1_test,
             x2_test,
             y.reshape(100, 100),
             alpha=0.2,
             levels=np.linspace(0, 1, 3))
plt.xlim(-5, 5)
plt.ylim(-5, 5)
plt.gca().set_aspect('equal', adjustable='box')
plt.savefig("demo/perceptron.png")
コード例 #9
0
with open(training_file) as ftrain, open(testing_file) as ftest:
    training = dp.prepare_data(ftrain)
    testing = dp.prepare_data(ftest)

    labels = dp.prepare_labels(training)
    dp.label_data(training, labels[1])
    dp.label_data(testing, labels[1])

    print(labels)

    print('Training data:')
    for t in training:
        print(t)
    print('Testing data:')
    for t in testing:
        print(t)

    alfa = float(input('Please input learning rate:\n'))
    perceptron = Perceptron(af.step, alfa, len(training[0]) - 1)
    print(perceptron.learn(0.99, training))

    classify_testing_data()

    while True:
        observation = dp.prompt_vector()
        decision = perceptron.classify(observation)
        decision_class = labels[decision]
        print('Decision output: {}\nDecision class: {}'.format(
            decision, decision_class))