def main(args): progress = WorkSplitter() progress.section("Tune Parameters") params = load_yaml(args.grid) params['models'] = {params['models']: models[params['models']]} train = load_numpy(path=args.path, name=args.dataset + args.train) unif_train = load_numpy(path=args.path, name=args.dataset + args.unif_train) valid = load_numpy(path=args.path, name=args.dataset + args.valid) hyper_parameter_tuning(train, valid, params, unif_train=unif_train, save_path=args.dataset + args.name, gpu_on=args.gpu, seed=args.seed, way=args.way, dataset=args.dataset)
def main(args): params = load_yaml(args.grid) params['models'] = {params['models']: models[params['models']]} R_train = load_numpy(path=args.path, name=args.train) R_valid = load_numpy(path=args.path, name=args.valid) hyper_parameter_tuning(R_train, R_valid, params, save_path=args.name, measure=params['similarity'], gpu_on=args.gpu)
def main(args): params = load_yaml(args.parameters) params['models'] = {params['models']: models[params['models']]} num_users = pd.read_csv(args.data_dir + args.user_col + '.csv')[args.user_col].nunique() num_items = pd.read_csv(args.data_dir + args.item_col + '.csv')[args.item_col].nunique() df_train = pd.read_csv(args.data_dir + args.train_set) df_train = df_train[df_train[args.rating_col] == 1] df_train[args.keyphrase_vector_col] = df_train[ args.keyphrase_vector_col].apply(ast.literal_eval) df_valid = pd.read_csv(args.data_dir + args.valid_set) keyphrase_names = pd.read_csv(args.data_dir + args.keyphrase_set)[ args.keyphrase_col].values if args.explanation: explanation_parameter_tuning(num_users, num_items, args.user_col, args.item_col, args.rating_col, args.keyphrase_vector_col, df_train, df_valid, keyphrase_names, params, save_path=args.save_path) else: hyper_parameter_tuning(num_users, num_items, args.user_col, args.item_col, args.rating_col, args.keyphrase_vector_col, df_train, df_valid, keyphrase_names, params, save_path=args.save_path)
def main(args): params = load_yaml(args.parameters) params['models'] = {params['models']: models[params['models']]} R_train = load_numpy(path=args.data_dir, name=args.train_set) R_valid = load_numpy(path=args.data_dir, name=args.valid_set) R_train_keyphrase = load_numpy(path=args.data_dir, name=args.train_keyphrase_set) R_valid_keyphrase = load_numpy(path=args.data_dir, name=args.valid_keyphrase_set) R_train_keyphrase[R_train_keyphrase != 0] = 1 R_valid_keyphrase[R_valid_keyphrase != 0] = 1 hyper_parameter_tuning(R_train, R_valid, R_train_keyphrase.todense(), R_valid_keyphrase, params, save_path=args.save_path, tune_explanation=args.tune_explanation)
def main(args): params = load_yaml(args.parameters) params['models'] = {params['models']: models[params['models']]} R_train = load_numpy(path=args.path, name=args.train) R_valid = load_numpy(path=args.path, name=args.valid) hyper_parameter_tuning(R_train, R_valid, params, save_path=args.save_path)