# this file is added for extra experiment - CT3
from collections import defaultdict

import lib
from lib.recommenders import *
import pandas as pd

BASELINES = [DebiasedModel]
OUTFILE = "results/3_different_size.json"
INFILE = "results/2a_find_best.json"

dataset = lib.ImplicitMovieLens("ml-25m-implicit")
metrics = [
    lib.eval.SumOfRanks(),
    lib.eval.RecallAtK(100),
    lib.eval.RecallAtK(50),
    lib.eval.RecallAtK(25)
]

supervised_testset = dataset.load_rankings(
    "datasets/labeled/similarity_judgements.test.csv",
    "movieId",
    "neighborId",
    "sim_bin",
    verbose=True)

results = dict()
table = list()

prev_models = lib.from_json(INFILE)
for size in [20, 50, 70, 100, 200, 380]:
        return None


def get_valid_int(max):
    valid = [str(x) for x in range(max + 1)]
    while True:
        res = input("Input option (0-%d) [empty to exit]: " % max)
        if not res.strip():
            return None
        elif res in valid:
            return int(res)
        else:
            print("Invalid option. Try again.")


movielens = lib.ImplicitMovieLens("ml-25m-implicit")
similarity_judgements = movielens.load_rankings(
    "datasets/labeled/similarity_judgements.train.csv",
    "movieId",
    "neighborId",
    "sim_bin",
    verbose=False)

models = load_models(movielens, similarity_judgements)

while True:
    title = input("Input (partial) movie title [empty to quit]: ")
    if not title.strip():
        break

    results = search(movielens, title)