def rs_recommend(proc_dir, model_path, item_list): with open(path.join(proc_dir, 'x2i.pickle'), 'rb') as handle: x2i = pickle.load(handle) model = DynamicAutoencoder() recoder = Recoder(model) recoder.init_from_model_file(model_path) interactions = load_item_list(x2i, recoder.num_items, item_list) out = recoder.predict(interactions) with open(path.join(proc_dir, 'recommendations.pickle'), 'wb') as handle: pickle.dump(out[0].detach().squeeze(0).numpy(), handle)
def load_models() -> Dict[str, Recoder]: model_paths = {} model_re = re.compile(r'^(?P<ds>.*)\.model$') for f in os.listdir(MODELS_DIR): match = model_re.match(f) if match: model_paths[match.group('ds')] = os.path.join(MODELS_DIR, f) recorders = {} for ds, path in model_paths.items(): model = DynamicAutoencoder() recoder = Recoder(model) recoder.init_from_model_file(path) recorders[ds] = recoder return recorders
def test_model(sparse, exp_recall_20, exp_recall_50, exp_ndcg_100): data_dir = 'tests/data/' model_dir = '/tmp/' train_df = pd.read_csv(data_dir + 'train.csv') val_df = pd.read_csv(data_dir + 'val.csv') # keep the items that exist in the training dataset val_df = val_df[val_df.sid.isin(train_df.sid.unique())] train_matrix, item_id_map, user_id_map = dataframe_to_csr_matrix(train_df, user_col='uid', item_col='sid', inter_col='watched') val_matrix, _, _ = dataframe_to_csr_matrix(val_df, user_col='uid', item_col='sid', inter_col='watched', item_id_map=item_id_map, user_id_map=user_id_map) train_dataset = RecommendationDataset(train_matrix) val_dataset = RecommendationDataset(val_matrix, train_matrix) use_cuda = False model = DynamicAutoencoder(hidden_layers=[200], activation_type='tanh', noise_prob=0.5, sparse=sparse) trainer = Recoder(model=model, use_cuda=use_cuda, optimizer_type='adam', loss='logloss') trainer.train(train_dataset=train_dataset, val_dataset=val_dataset, batch_size=500, lr=1e-3, weight_decay=2e-5, num_epochs=30, negative_sampling=True) # assert model metrics recall_20 = Recall(k=20, normalize=True) recall_50 = Recall(k=50, normalize=True) ndcg_100 = NDCG(k=100) results = trainer._evaluate(eval_dataset=val_dataset, num_recommendations=100, metrics=[recall_20, recall_50, ndcg_100], batch_size=500) for metric, value in list(results.items()): results[metric] = np.mean(results[metric]) assert np.isclose(results[recall_20], exp_recall_20, atol=0.01, rtol=0) assert np.isclose(results[recall_50], exp_recall_50, atol=0.01, rtol=0) assert np.isclose(results[ndcg_100], exp_ndcg_100, atol=0.01, rtol=0) # Save the model and evaluate again model_checkpoint = model_dir + 'test_model.model' state_file = trainer.save_state(model_checkpoint) model = DynamicAutoencoder(sparse=sparse) trainer = Recoder(model=model, use_cuda=use_cuda, optimizer_type='adam', loss='logloss') trainer.init_from_model_file(state_file) results = trainer._evaluate(eval_dataset=val_dataset, num_recommendations=100, metrics=[recall_20, recall_50, ndcg_100], batch_size=500) for metric, value in list(results.items()): results[metric] = np.mean(results[metric]) assert np.isclose(results[recall_20], exp_recall_20, atol=0.01, rtol=0) assert np.isclose(results[recall_50], exp_recall_50, atol=0.01, rtol=0) assert np.isclose(results[ndcg_100], exp_ndcg_100, atol=0.01, rtol=0) os.remove(state_file)
common_params = { 'user_col': 'uid', 'item_col': 'sid', 'inter_col': 'watched', } method = 'inference' model_file = model_dir + 'bce_ns_d_0.0_n_0.5_200_epoch_100.model' index_file = model_dir + 'bce_ns_d_0.0_n_0.5_200_epoch_100.model.index' num_recommendations = 100 if method == 'inference': model = DynamicAutoencoder() recoder = Recoder(model) recoder.init_from_model_file(model_file) recommender = InferenceRecommender(recoder, num_recommendations) elif method == 'similarity': embeddings_index = AnnoyEmbeddingsIndex() embeddings_index.load(index_file=index_file) cache_embeddings_index = MemCacheEmbeddingsIndex(embeddings_index) recommender = SimilarityRecommender(cache_embeddings_index, num_recommendations, scale=1, n=50) train_df = pd.read_csv(data_dir + 'train.csv') val_te_df = pd.read_csv(data_dir + 'test_te.csv') val_tr_df = pd.read_csv(data_dir + 'test_tr.csv')
common_params = { 'user_col': 'uid', 'item_col': 'sid', 'inter_col': 'listen', } method = 'inference' model_file = model_dir + 'bce_ns_d_0.0_n_0.5_200_epoch_80.model' index_file = model_dir + 'bce_ns_d_0.0_n_0.5_200_epoch_80.model.index' num_recommendations = 100 if method == 'inference': model = DynamicAutoencoder() recoder = Recoder(model, use_cuda=True) recoder.init_from_model_file(model_file) recommender = InferenceRecommender(recoder, num_recommendations) elif method == 'similarity': embeddings_index = AnnoyEmbeddingsIndex() embeddings_index.load(index_file=index_file) cache_embeddings_index = MemCacheEmbeddingsIndex(embeddings_index) recommender = SimilarityRecommender(cache_embeddings_index, num_recommendations, scale=1, n=50) train_df = pd.read_csv(data_dir + 'train.csv') val_te_df = pd.read_csv(data_dir + 'test_te.csv') val_tr_df = pd.read_csv(data_dir + 'test_tr.csv')
train_dataset = RecommendationDataset(train_matrix) val_tr_dataset = RecommendationDataset(val_tr_matrix, val_te_matrix) use_cuda = True model = DynamicAutoencoder(hidden_layers=[200], activation_type='tanh', noise_prob=0.5, sparse=False) # NOTE(keshav2): Don't remove in case we want to try a different model # model = MatrixFactorization(embedding_size=200, activation_type='tanh', # dropout_prob=0.5, sparse=False) trainer = Recoder(model=model, use_cuda=use_cuda, optimizer_type='adam', loss='logistic', user_based=False, gavel_dir=(args.checkpoint_dir if args.enable_gavel_iterator else None)) metrics = [Recall(k=20, normalize=True), Recall(k=50, normalize=True), NDCG(k=100)] try: trainer.train(args.local_rank, train_dataset=train_dataset, val_dataset=val_tr_dataset, batch_size=args.batch_size, lr=1e-3, weight_decay=2e-5, num_epochs=args.num_epochs, negative_sampling=True, lr_milestones=None,#[60, 80], num_data_workers=0, model_checkpoint_prefix=checkpoint_path, checkpoint_freq=0, eval_num_recommendations=0, metrics=metrics, eval_freq=0)
val_te_dataset.fill_from_dataframe(dataframe=val_te_df, **common_params) val_tr_dataset.fill_from_dataframe(dataframe=val_tr_df, **common_params) use_cuda = True model = DynamicAutoencoder(hidden_layers=[200], activation_type='tanh', noise_prob=0.5, sparse=True) # model = MatrixFactorization(embedding_size=200, activation_type='tanh', # dropout_prob=0.5, sparse=False) trainer = Recoder(model=model, use_cuda=use_cuda, optimizer_type='adam', loss='logistic', user_based=False, index_ids=False) # trainer.init_from_model_file(model_dir + 'bce_ns_d_0.0_n_0.5_200_epoch_50.model') model_checkpoint = model_dir + 'bce_ns_d_0.0_n_0.5_200' metrics = [ Recall(k=20, normalize=True), Recall(k=50, normalize=True), NDCG(k=100) ] try: trainer.train(train_dataset=train_dataset, val_dataset=val_tr_dataset,
def train_rs(proc_dir: str, model_dir: str, model_name: str, lr: float, lr_milestones: List[int], wd: float, epochs: int, emb_size: int, batch_size: int, valid_users_pct: float, valid_items_pct: float, wo_eval: bool): print('Reading data...') ds = pd.read_csv(path.join(proc_dir, 'ds.csv')) ds['inter'] = 1 item_identity = {i: i for i in ds['item']} if wo_eval: train = ds else: print('Train test split...') train, valid = train_test_split(ds, valid_users_pct) valid_t, valid_e = train_eval_split(valid, valid_items_pct) del valid del ds print('Making sparse matrices...') common_params = { 'user_col': 'user', 'item_col': 'item', 'inter_col': 'inter', } train_matrix, _, _ = dataframe_to_csr_matrix(train, item_id_map=item_identity, **common_params) train_dataset = RecommendationDataset(train_matrix) del train if wo_eval: valid_dataset = None else: # noinspection PyUnboundLocalVariable val_t_matrix, _, user_id_map = dataframe_to_csr_matrix( valid_t, item_id_map=item_identity, **common_params) # noinspection PyUnboundLocalVariable val_e_matrix, _, _ = dataframe_to_csr_matrix(valid_e, item_id_map=item_identity, user_id_map=user_id_map, **common_params) valid_dataset = RecommendationDataset(val_t_matrix, val_e_matrix) del valid_t, valid_e use_cuda = True print('Training model...') model = DynamicAutoencoder(hidden_layers=[emb_size], activation_type='tanh', noise_prob=0.5, sparse=False) trainer = Recoder(model=model, use_cuda=use_cuda, optimizer_type='adam', loss='logistic', user_based=False) metrics = [ Recall(k=20, normalize=True), Recall(k=50, normalize=True), NDCG(k=100) ] model_prefix = path.join(model_dir, model_name) eval_num_recs = 100 trainer.train(train_dataset=train_dataset, val_dataset=valid_dataset, batch_size=batch_size, lr=lr, weight_decay=wd, num_epochs=epochs, negative_sampling=True, lr_milestones=lr_milestones, num_data_workers=mp.cpu_count(), model_checkpoint_prefix=model_prefix, checkpoint_freq=0, eval_num_recommendations=eval_num_recs, metrics=metrics, eval_freq=5) actual_path = "{}_epoch_{}.model".format(model_prefix, epochs) shutil.move(actual_path, model_prefix + '.model') results = trainer._evaluate(valid_dataset, eval_num_recs, metrics, batch_size) with open(model_prefix + '_metrics.json', 'w') as f: json.dump( {str(metric): np.mean(results[metric]) for metric in metrics}, f)