Beispiel #1
0
def save():
    data = get_data()

    model = KNN()
    model.fit(data)

    pickle.dump(model, open('model.pkl', 'wb'))
Beispiel #2
0
def predict_knn(filepath, cells):
    knn = KNN(3, train=False)
    knn.load_knn(filepath)
    grid = ""
    for cell in cells:
        cell = cell.reshape(1, -1)
        pred = knn.predict(cell)
        # period character was used as the wildcard value, but on the backend
        # the model is trained to predict a 0, so it is remapped.
        if pred == 0:
            pred = "."
        grid += str(pred)
    return grid
def compare_models(hidden_dim, iterations, input_dim = 3, verbose = False):
        
    f1 = Symmetric(input_dim, hidden_dim, hidden_dim, 10)
    f2 = KNN(input_dim, hidden_dim, hidden_dim, 10)
    f3 = KK(input_dim, hidden_dim, hidden_dim, 10)

    f1.__name__ = "S1"
    f2.__name__ = "S2"
    f3.__name__ = "S3"

    models = [f1, f2, f3]
    
    lambs = [0., 1e-6, 1e-4, 1e-2]

    for model in models:
        print("model", model.__name__)
        cv_models = cross_validate(model, train_loader, iterations, lambs, verbose)
        
        validation_errors = np.zeros_like(lambs)
        for i, cv_model in enumerate(cv_models):
            validation_errors[i] = test(cv_model, train_loader)
        
        i = np.argmin(validation_errors)
        lamb = lambs[i]
            
        runs = 3
        run_errors = np.zeros(runs)
        for i in range(runs):
            print("run", i)
            model_copy = copy.deepcopy(model)
            model_copy.reinit()
            train(model_copy, train_loader, iterations, lamb)
            run_errors[i] = test(model_copy, test_loader)
        
        mean_error = np.mean(run_errors)
        std_error = np.std(run_errors)
        
        print("mean: {}, std: {}".format(mean_error, std_error))
Beispiel #4
0
def main():
    k = 3
    split = 0.8
    header, x_train, y_train, x_test, y_test = KNN.load('IRIS.csv', split)
    knn = KNN(x_train, y_train, k)
    y_pred = knn.test(x_test)
    pprint(y_pred)
    accuracy = KNN.accuracy(y_pred, y_test)
    print(f"Acuracy est de  {accuracy}")
    flower = [6.1, 2.9, 4.7, 1.4]
    prediction = knn.test([flower])
    print(f"fleur  {flower} est un {prediction}")
def main():
    k = 3
    split = 0.8
    header, x_train, y_train, x_test, y_test = KNN.load('iris.csv', split)
    knn = KNN(x_train, y_train, k)
    y_pred = knn.test(x_test)
    pprint(y_pred)
    accuracy = KNN.accuracy(y_pred, y_test)
    print(f"Accuracy est de: {accuracy}")

    flower = [0.8, 0.0, 5, 0]
    prediction = knn.test([flower])
    print(f"La fleur {flower} est un {prediction[0]}")
    train_sampler = SubsetRandomSampler(train_idx)
    val_sampler = SubsetRandomSampler(valid_idx)

    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, sampler = train_sampler)
    val_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, sampler = val_sampler)
    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size)



    input_dim = 3
    if args.model == 'overkill':
        model = Overkill(input_dim, args.h1, args.h2, args.h3, 10).to(device)
    elif args.model == 's1':
        model = Symmetric(input_dim, args.h1, args.h2, 10).to(device)
    elif args.model == 's2':
        model = KNN(input_dim, args.h1, args.h2, 10).to(device)
    elif args.model == 's3':
        model = KK(input_dim, args.h1, args.h2, 10).to(device)
            
    train(model, train_loader, args.iterations, lamb = args.lamb, lr = args.lr)
    
    print(args)
            
    error = test(model, train_loader)
    print("train error: ", error)

    error = test(model, val_loader)
    print("val error: ", error)
            
    error = test(model, test_loader)
    print("test error: ", error)
@author: Avinash
"""

from model import KNN
from utils import get_mnist

mnist_data=get_mnist()

X=mnist_data[:,1:]
Y=mnist_data[:,0]

X_train=X[0:int(X.shape[0]*0.9)]
Y_train=Y[0:int(X.shape[0]*0.9)]

X_test=X[int(X.shape[0]*0.9):]
Y_test=Y[int(X.shape[0]*0.9):]

#X_train/=255.0

model=KNN()

model.fit(X_train,Y_train,n=5)

#X_test/=255.0
test_acc=model.evaluate(X_test,Y_test)

print("Test Accuracy : ",test_acc)

'''
Test Accuracy :  0.9671428571428572
'''
Beispiel #8
0
        prompt += f" {next_}"
    print()


def list_predictions(model: Model,
                     prompt: str,
                     num_predictions: int = 5) -> None:
    token_odds = model.predict(prompt)
    for i, item in enumerate(list(token_odds.items())):
        print(f"\t{i + 1}. '{item[0]}' ({item[1]})")
        if i + 1 == num_predictions:
            return


prompt = "The hitch hiker's guide to the"

if __name__ == "__main__":
    print(f"Prompt: {prompt}")

    print("NAIVE")
    m: Model = Naive(2, history=5)
    m.fit(text)
    list_predictions(m, prompt)
    free_write(m, prompt)

    print("KNN")
    m = KNN(2, {Distance.NAIVE, Distance.WU_PALMER})
    m.fit(text)
    list_predictions(m, prompt)
    free_write(m, prompt)
Beispiel #9
0
# -*- encoding: utf-8 -*-
"""
@Description: 实现了原始的KNN,损失函数默认使用欧氏距离。
@Time : 2021-3-13 21:03 
@File : main.py 
@Software: PyCharm
"""
from model import KNN
from utils import setup_seed, dataloader
import numpy as np
from sklearn.metrics import accuracy_score

seed = 1234
setup_seed(seed)
x_train, x_test, y_train, y_test = dataloader(ratio=0.9)
knn = KNN(k=5)
test_num = x_test.shape[0]

# train
knn.train(instences=x_train, labels=y_train)
# test
y_pred_list = list()
for instance_id in range(test_num):
    x = x_test[instance_id]
    y_pred = knn.eval(x)
    y_pred_list.append(y_pred)

y_pred_array = np.array(y_pred_list)
print("Final Accuracy: {}%.".format(
    accuracy_score(y_pred_array, y_test) * 100))