コード例 #1
0
def perceptronModel():
    try:
        x_train, y_train, isBiased, learningRate, epochNum, x_test, y_test = modelOperations(
            'perceptron')

        W = perceptron.train(x_train, y_train, isBiased, learningRate,
                             epochNum)

        labels = [firstClassCB.get(), secondClassCB.get()]
        perceptron.test(x_test, y_test, W, labels)
        # show confusion matrix and accuracy
    except:
        pass
コード例 #2
0
ファイル: q4.py プロジェクト: oryband/homework
def simulate_skin(steps=5, max_iter=100, learning_rate=0.1):
    """Simulate learning skin data set."""
    data = read_data('Skin_NonSkin.txt')
    train_data, test_data = split_list(data, 0.75)

    start = len(train_data)/steps  # First step training set size.
    end = len(train_data)  # Final step training set size.

    sizes = []  # Training data set sizes.
    success = []  # Success rates according to training data set sizes.
    for i in xrange(steps):
        # Increase training data size according to iteration.
        size = start + i*end/steps
        current_train_data = train_data[:size]

        w = train(current_train_data, max_iter=max_iter, r=learning_rate)
        error = test(test_data, w)

        status(current_train_data, test_data, error)
        print

        # Record size-success statistics.
        sizes.append(size)
        success.append(100 - error)

    plot_success_per_size(sizes, success)
    show()
コード例 #3
0
ファイル: q4.py プロジェクト: oryband/homework
def simulate_seperable(data_size):
    """Simulate learning a completely seperable data set."""
    data = generate_sphere_data(10000, margin=0)
    train_data, test_data = split_list(data, 0.75)
    w = train(train_data, max_iter=500, r=0.01)
    error = test(test_data, w)
    status(train_data, test_data, error)

    plot_data(data)
    plot_w(data, w)
    show()
コード例 #4
0
ファイル: q4.py プロジェクト: oryband/homework
def simulate_increasing(data_size, margin=0.3, max_iter=100, learning_rate=0.1,
                        steps=5, start=None, end=None):
    """Simulate learning an increasing training data set.

    Generates an unseperable data set, and trains on an increasing training
    set, then tests and plots.

    start: Initial (first step) training data set size.
    end: Final (last step) training data set size.
    """
    data = generate_sphere_data(data_size, margin=margin)
    train_data, test_data = split_list(data, 0.75)

    # Initialize start/end sizes if not given.
    start = len(train_data)/steps if start is None else start
    end = len(train_data) if end is None else end

    w_colors = ['b', 'c', 'm', 'y', 'k']  # w vector (line) graph color.
    w_gs = []  # w plot graphs.
    sizes = []  # Training data set sizes.
    success = []  # Success rates according to training data set sizes.
    for i in xrange(steps):
        # Increase training data size according to iteration.
        size = start + i*end/steps
        current_train_data = train_data[:size]

        w = train(current_train_data, max_iter=max_iter, r=learning_rate)
        error = test(test_data, w)

        status(current_train_data, test_data, error)
        print

        # Record size-success statistics.
        sizes.append(size)
        success.append(100 - error)

        # Plot decision boundary.
        w_color = w_colors[i] if i < len(w_colors) else w_colors[-1]
        figure(0)
        g, = plot_w(current_train_data, w, color=w_color)
        w_gs.append(g)

    figure(0).suptitle('Test data size: %d\nMaximum iterations: %d' % (len(test_data), max_iter))
    plot_w_legend(w_gs, sizes)
    plot_data(data)

    figure(1).suptitle('Success rate according to training set size.')
    plot_success_per_size(sizes, success)

    show()
コード例 #5
0
def main():
    training = read_data(argv[1])
    test = read_data(argv[2])

    filter_modes = ["unfiltered", "filtered"]
    iteration_limits = [10, 25, 50, 100]
    training_rates = [0.01, 0.05, 0.1, 0.5, 0.8]
    for mode in filter_modes:
        for limit in iteration_limits:
            for rate in training_rates:
                weights = perceptron.train(training[mode], limit, rate)
                accuracy = perceptron.test(test[mode], weights) * 100
                print("{0:.6f}".format(accuracy) + "% accurate:", mode,
                      "stop words,", limit, "iterations,", rate,
                      "training rate")
コード例 #6
0
ファイル: main.py プロジェクト: amary21/bigDataSample
import perceptron
import csvconv

#inputs
lr = float(input("Input Learning Rate:"))
Epochs = int(input("Input Number Of Epochs:"))

#training
floatdata = csvconv.tofloat('train.csv')
weights = perceptron.train(floatdata, lr, Epochs)
print("Weights\n ", weights)

#testing
floattest = csvconv.tofloat('test.csv')
score = perceptron.test(floattest, weights)
print('Score=', score, '/', len(floattest))
acc = (score / len(floattest)) * 100
print("Accuracy=", acc)
コード例 #7
0
ファイル: main.py プロジェクト: loren3737/NLP-Project
    X_train, Y_train = reviews_to_features(training_set)
    print("Featurized training data.")

    weights, losses = perceptron.train(X_train,
                                       Y_train,
                                       iterations=ITERATIONS,
                                       eta=ETA)
    print("Done training.")

    X_test, Y_test = reviews_to_features(dev_set)
    print("Featurized test data.")

    test_scores = perceptron.score(X_test.T, weights)
    test_sentiments = perceptron.predict(test_scores)
    (accuracy, recall, precision, f1, false_positive_rate,
     false_negative_rate) = perceptron.test(Y_test, test_sentiments)
    print(
        f"Predicted scores w/ threshold = 0.5, iterations = {ITERATIONS}, eta = {ETA}:"
    )
    print(f"  - accuracy      : {accuracy}")
    print(f"  - recall        : {recall}")
    print(f"  - precision     : {precision}")
    print(f"  - f1            : {f1}")
    print(f"  - fpr           : {false_positive_rate}")
    print(f"  - fnr           : {false_negative_rate}")

    if ENABLE_ROC:
        fpr = []
        fnr = []
        thresholds = list(np.round(np.linspace(0, 1, 101), decimals=2))
        for threshold in thresholds:
コード例 #8
0
import random, numpy, csv
import perceptron


def load_csv(filename):
    file = open(filename, "rb")
    lines = csv.reader(file)
    dataset = list(lines)
    random.shuffle(dataset)
    return dataset


inputs = load_csv("iris.csv")
weights = [numpy.random.random_sample() for i in range(len(inputs))]
bias = 0
learning_rate = 0.01

weights = perceptron.train(inputs, weights, bias, learning_rate)
c = perceptron.test(inputs, weights, bias)
print c
コード例 #9
0
parser.add_argument(
    "-p",
    "--pedestrian",
    help=
    "a pedestrian is present - add this tag if the image contains a pedestrian and omit the tag if it does not",
    action="store_true")
parser.add_argument("-w",
                    "--weights",
                    help="input file for weights (pickled)",
                    required=True)

args = parser.parse_args()

# Load image, get HOG
whole_hog = hog.hog(Image.open(args.image))
weights = None

# Open weights
with open(args.weights, 'rb') as f:
    weights = pickle.load(f)

# Use perceptron
result = perceptron.test(whole_hog, weights)

if result == args.pedestrian:
    print("Success")
else:
    print("Failure")

exit(0 if result == args.pedestrian else 1)