train = pd.concat((train, dr)) users = ground.user.unique() model = BPR_MF(n_iter=100, learning_rate=1e-1, sparse=False, batch_size=4096, n_jobs=8, use_cuda=False, optimizer_func=Adam, verbose=True, stopping=True, early_stopping=True) interactions = train[['user', 'item', 'rating']].values np.random.shuffle(interactions) x = interactions[:, :-1] y = interactions[:, -1] model.fit(x, y, {'users': 0, 'items': 1}, n_users=n_users, n_items=n_items) x = cartesian2D(ground.user.unique().reshape(-1, 1), item_cat.reshape(-1, 1)) pred = model.predict(x).reshape(ground.user.unique().shape[0], item_cat.shape[0]) np.save("predictions.txt", pred) arg = np.argsort(-pred, 1) np.save("rankings.txt", arg)
model = FM_MMR(n_iter=1, learning_rate=1e-1, logger=logger, n_jobs=4, use_cuda=False) x = interactions[:, :-1] y = interactions[:, -1] model.fit(x, y, dic={ 'users': 0, 'items': 1, 'artists': 2 }, n_users=n_users, n_items=n_items, lengths={"n_artists": n_artists}) logger = TLogger() plt.plot(logger.epochs, logger.losses) plt.show() users = np.unique(x[:, 0]).reshape(-1, 1)[:10, :] items = train[['tps_id', 'artist_id', 'mode']].drop_duplicates('tps_id').values values = cartesian2D(users, items) top = 3 table = np.zeros((users.shape[0], top + 1), dtype=np.object) table[:, 0] = users[:, 0] table[:, 1:] = model.predict(values, top=top, b=1) print(table)
n_jobs=N_JOBS, batch_size=BATCH_SIZE, n_factors=FACTORS, learning_rate=LEARNING_RATE, use_cuda=USE_CUDA, verbose=VERBOSE, sparse=SPARSE) train = pd.read_csv(train_path, header=0) ground = pd.read_csv(ground_path, header=0) dataset = pd.concat((train, ground)) users = dataset.user.unique() item_catalogue = dataset[proj[1:-1]].drop_duplicates().values values = cartesian2D(users.reshape(-1, 1), item_catalogue) model0 = FM(n_iter=N_ITER, model=MODEL_PATH, n_jobs=N_JOBS, batch_size=BATCH_SIZE, n_factors=FACTORS, learning_rate=LEARNING_RATE, use_cuda=USE_CUDA, verbose=VERBOSE, sparse=SPARSE) rank = model0.predict(values) \ .reshape(users.shape[0], item_catalogue.shape[0]) for b in tqdm([1.0], desc="sys.div.", leave=False):
# Create Ground Truth ground = test[~test.index.isin(dr.index)].dropna() train = pd.concat((train, dr)) users = ground.user.unique() model = FM(n_iter=100, learning_rate=1e-3, sparse=False, batch_size=4096, n_jobs=8, optimizer_func=Adam, use_cuda=True, verbose=True, stopping=True, early_stopping=True) interactions = train[['user', 'item', 'rating']].values np.random.shuffle(interactions) x = interactions[:, :-1] y = interactions[:, -1] model.fit(x, y, {'users': 0, 'items': 1}, n_items=n_items, n_users=n_users) x = cartesian2D(users.reshape(-1, 1), item_cat.reshape(-1, 1)) pred = model.predict(x).reshape(users.shape[0], item_cat.shape[0]) np.save("predictions.txt", pred) arg = np.argsort(-pred, 1) np.save("rankings.txt", arg)