Example #1
0
def main():
    in_arg = func.get_prediction_input_args()
    func.check_command_line_args_prediction(in_arg)

    device = torch.device(
        "cuda" if torch.cuda.is_available() and in_arg.gpu == True else "cpu")

    model = func.load_checkpoint(in_arg.checkpoint, device)

    probs, labels = func.predict(in_arg.image_path, model, device,
                                 in_arg.category_names, in_arg.top_k)
Example #2
0
def get_recommendation():
    """Show a game the user would probably like"""

    games_info = []
    # count how many games the user has rated
    user_id = session["user_id"]
    num_games = (db.session.query(func.count(
        Rating.user_id)).filter(Rating.user_id == user_id).first())
    num_games = int(num_games[0])

    ratings = db.session.query(Rating.score).filter_by(user_id=user_id).all()
    ratings = [rating[0] for rating in ratings]
    # calculate standard deviation
    deviation = standard_deviation(ratings)
    # if this user rates everything the same, send them this error page
    if deviation == 0:
        games_info.append("no")
        return render_template("recommendation.html", games_info=games_info)
    # if you've rated more than 10 games and you don't rate everything the same:
    if num_games >= 10 and deviation != 0:
        recommendations = []

        systems = request.args.getlist("systems")
        genres = request.args.getlist("genres")

        # if systems aren't checked, use all of current users' systems
        if systems == []:
            # get users' system ids
            user_system = UserSystem.query.filter_by(user_id=user_id).all()
            for i in user_system:
                systems.append(i.system_id)
        # if genres aren't checked, use all genres
        if genres == []:
            all_genres = Genre.query.all()
            for i in all_genres:
                genres.append(i.genre_id)

        filt_games = (db.session.query(Game.game_id).join(
            GameSystem,
            GameGenre).filter(GameSystem.system_id.in_(systems),
                              GameGenre.genre_id.in_(genres)).all())

        filt_games = [game[0] for game in filt_games]

        for game in filt_games:
            try:
                sims = get_all_similarities(game, users, games, user_id)
            except:
                games_info.append(num_games)
                return render_template("recommendation.html",
                                       games_info=games_info)
            raw_pred = predict(sims, users, game)
            # print "Printing raw_pred"
            # print raw_pred

            if raw_pred > 3.0:
                recommendation = (game, raw_pred)
                recommendations.append(recommendation)
        for rec in recommendations:
            one_game_info = Game.query.filter(Game.game_id == rec[0]).first()
            name = one_game_info.name
            game_id = one_game_info.game_id
            if not one_game_info.covers:
                cover = None
            else:
                cover = one_game_info.covers[0].cover_url

            if isinstance(rec[1], (str, unicode)) == False:
                percentage = round(rec[1] * 20, 2)
            else:
                percentage = rec[1]
            games_info.append({
                "name": name,
                "percentage": percentage,
                "game_id": game_id,
                "cover": cover
            })
    else:
        games_info.append(num_games)

    return render_template("recommendation.html", games_info=games_info)
Example #3
0
import torchvision
from torchvision import datasets, transforms, models
import argparse

from helper_functions import loadJson
from helper_functions import predict
from helper_functions import loadModel

parser = argparse.ArgumentParser(description="Load NN")
parser.add_argument('data_directory', help="Path for imege files")
parser.add_argument('--gpu',
                    default=False,
                    action='store_true',
                    help="Use gpu boolean")
parser.add_argument('--category_names',
                    default='./cat_to_name.json',
                    help="Category file path")
parser.add_argument('--top_k',
                    default=1,
                    type=int,
                    help="Number for likely classes")
parser.add_argument('checkpoint', help="Path for checkpoint")

arguments = parser.parse_args()
categories = loadJson(arguments.category_names)
model = loadModel(arguments.checkpoint)
predict(categories, arguments.data_directory, model, arguments.gpu,
        arguments.top_k)
Example #4
0
argparser.add_argument('--architecture',
                       action="store",
                       default="vgg11",
                       dest="pretrained_model")

parsed_results = argparser.parse_args()

data_directory = parsed_results.data_directory
checkpoint_dir = parsed_results.load_checkpoint
top_k = int(parsed_results.top_k)
device_flag = bool(parsed_results.use_gpu)
pretrained_model = parsed_results.pretrained_model
categories = parsed_results.categories

#Load & Configure Models

model = getattr(models, pretrained_model)(pretrained=True)
model = load_checkpoint(model, checkpoint_dir)

with open(categories, 'r') as cat_file:
    cat_to_name = json.load(cat_file)

# Predicitons and probabilities
probs, classes = predict(data_directory, model, device_flag, top_k)

# Print classes and corresponding probabilities
for probs, classes in zip(probs, classes):
    print("Class is {} & Probability is {}".format(cat_to_name[classes],
                                                   probs))
'./flowers/valid/3/image_06631.jpg'
Example #5
0
                    action='store_true',
                    help='For using GPU for prediction')

in_arg = parser.parse_args()
input_image = ''.join(in_arg.input_image)
k = in_arg.top_k
mapping = ''.join(in_arg.category_names)
path = ''.join(in_arg.checkpoint)

if in_arg.gpu and torch.cuda.is_available():
    device = 'cuda'
elif in_arg.gpu and ~torch.cuda.is_available():
    print("GPU is not available, so we will do prediction on CPU.")
    device = 'cpu'
else:
    device = 'cpu'

model, criterion, optimizer = helper_functions.load_model(path)
# print("Model is loaded successfully")
with open(mapping, 'r') as json_file:
    cat_to_name = json.load(json_file)

prob, classes = helper_functions.predict(input_image, model, k, device)
# print("Prediction is done successfully")
category = [cat_to_name[str(cls)] for cls in classes]

for i in range(k):
    print("Rank {}: Predicted flower category {} with a probability of {}.".
          format(i + 1, category[i], prob[i]))

# print("Prediction Completed")
Example #6
0
import pandas as pd
import numpy as np
from helper_functions import create_X, create_y_train, train_model, predict, score

train = pd.read_csv('data/Train.csv', parse_dates=['saledate'])
test = pd.read_csv('data/Test.csv', parse_dates=['saledate'])

X_train = create_X(train)
X_test = create_X(test)
y_train = create_y_train(train)

model = train_model(X_train, y_train)

submit = predict(model, test, X_test, 'model_1')

y_test = pd.read_csv('data/do_not_open/test_soln.csv')

print(score(submit, y_test))  # final score: 0.4102042700770253
Example #7
0
training_data_labels = []
for record in training_data_list:
    training_data.append(record[0:6])
    training_data_labels.append(record[6])
testing_data = []
testing_data_labels = []
for record in testing_data_list:
    testing_data.append(record[0:6])
    testing_data_labels.append(record[6])

error_matrix = []
for k in range(1, 30):
    correct_classified = 0
    incorrect_classified = 0
    accuracy = 0
    for record, label in zip(testing_data, testing_data_labels):
        nearest_neighbours, labels = k_nearest_neighbours(
            record, k, training_data, training_data_labels)
        if (predict(record, nearest_neighbours, labels) == label):
            correct_classified = correct_classified + 1
            #print("S")
        else:
            incorrect_classified = incorrect_classified + 1
            #print("F")
    error = incorrect_classified / (correct_classified + incorrect_classified)
    error_matrix.append(error)
print(error_matrix)

import matplotlib.pyplot as plt
plt.plot(error_matrix)
Example #8
0
train = pd.read_csv('data/Train.csv', parse_dates=['saledate'])
test = pd.read_csv('data/Test.csv', parse_dates=['saledate'])


X_train = create_X(train)
X_test = create_X(test)
y_train = create_y_train(train)

<<<<<<< HEAD
X_train_normalized, X_test_normalized = normalize_X(X_train, X_test)

model_linear = train_model(X_train, y_train, LinearRegression())
model_ridge = train_model(X_train_normalized, y_train, Ridge())
model_lasso = train_model(X_train_normalized, y_train, Lasso(alpha=0.00005, max_iter=120000))

submit_linear = predict(model_linear, test, X_test, 'model_lin')
submit_ridge = predict(model_ridge, test, X_test_normalized, 'model_rid')
submit_lasso = predict(model_lasso, test, X_test_normalized, 'model_las')

y_test = pd.read_csv('data/do_not_open/test_soln.csv')

print('Linear: ', score(submit_linear, y_test), '; Ridge: ', score(submit_ridge, y_test), '; Lasso: ', score(submit_lasso, y_test))
# Linear:  0.40826129534246886 ; Ridge:  0.40822991882415727 ; Lasso:  0.40834486305959367
# Pick Ridge
=======
model = train_model(X_train, y_train)

submit = predict(model, test, X_test, 'model_1')

y_test = pd.read_csv('data/do_not_open/test_soln.csv')
Example #9
0
    type=str,
    help=
    "mapping of categories to real names file, default is cat_to_name.json",
    default='cat_to_name.json')
parser.add_argument('--gpu',
                    action="store_true",
                    default=False,
                    help='GPU mode for training, default is off')

results = parser.parse_args()

image_path = results.image_path
checkpoint = results.checkpoint
top_k = results.top_k
category_names = results.category_names
gpu = results.gpu

model = load_checkpoint(checkpoint)

with open(category_names, 'r') as f:
    cat_to_name = json.load(f)

probs, classes = predict(image_path, model, top_k, gpu)

names = []
for i in classes:
    names += [cat_to_name[str(i)]]

print(probs)
print(classes)
print(names)
Example #10
0
                        help='image for transformation or viewing',
                        metavar='IMAGE_PATH')
    parser.add_argument('-o',
                        type=str,
                        dest='image_output_path',
                        help='image output path',
                        metavar='IMAGE_OUTPUT_PATH')
    parser.add_argument('--iters',
                        type=int,
                        dest='iters',
                        help='iter times, only for temp_view mode',
                        metavar='ITER_TIMES',
                        default=500)

    return parser


if __name__ == '__main__':
    parser = build_parser()
    args = parser.parse_args()

    with open(args.config_path) as f_config:
        options = json.load(f_config)

    if args.mode == 'train':
        train(options)
    elif args.mode == 'predict':
        predict(options, args.image_path, args.image_output_path)
    elif args.mode == 'temp_view':
        temp_view(options, args.image_path, args.image_output_path, args.iters)