def test01_most_similar(self): set_log_level(2) data_opt = self.get_ml100k_mm_opt() opt = ALSOption().get_default_option() opt.d = 20 opt.num_workers = 1 als = ALS(opt, data_opt=data_opt) als.initialize() als.train() pals = ParALS(als) random_keys = [ k for k, _ in als.most_similar('49.Star_Wars_(1977)', topk=128) ] random_indexes = als.get_index_pool(random_keys) naive = [als.most_similar(k, topk=10) for k in random_keys] topks0 = [[k for k, _ in result] for result in naive] scores0 = np.array([[v for _, v in result] for result in naive]) self.assertEqual(scores0.shape, ( 128, 10, ), msg='check even size') scores0 = scores0.reshape(len(naive), 10) pals.num_workers = 1 topks1, scores1 = pals.most_similar(random_keys, topk=10, repr=True) topks2, scores2 = pals.most_similar(random_indexes, topk=10, repr=True) for a, b in combinations([topks0, topks1, topks2], 2): self.assertEqual(a, b) for a, b in combinations([scores0, scores1, scores2], 2): self.assertTrue(np.allclose(a, b))
def test05_topk_MT(self): set_log_level(2) data_opt = self.get_ml100k_mm_opt() opt = ALSOption().get_default_option() opt.d = 20 opt.num_workers = 1 als = ALS(opt, data_opt=data_opt) als.initialize() als.train() als.build_userid_map() all_keys = als._idmanager.userids start_t = time.time() naive = als.topk_recommendation(all_keys, topk=5) naive_elapsed = time.time() - start_t pals = ParALS(als) pals.num_workers = 4 start_t = time.time() qkeys1, topks1, scores1 = pals.topk_recommendation(all_keys, topk=5, repr=True) par_elapsed = time.time() - start_t self.assertEqual(len(qkeys1), len(naive)) for q, t in zip(qkeys1, topks1): self.assertEqual(naive[q], t) self.assertTrue(naive_elapsed > par_elapsed * 1.5)
def als(self, database, **kwargs): from buffalo.algo.als import ALS opts = self.get_option('buffalo', 'als', **kwargs) data_opt = self.get_database(database, **kwargs) als = ALS(opts, data_opt=data_opt) als.initialize() if kwargs.get('return_instance_before_train'): return als elapsed, mem_info = self.run(als.train) als = None return elapsed, mem_info
def example2(): log.set_log_level(log.INFO) als_option = ALSOption().get_default_option() data_option = MatrixMarketOptions().get_default_option() data_option.input.main = '../tests/ext/ml-20m/main' data_option.input.iid = '../tests/ext/ml-20m/iid' data_option.data.path = './ml20m.h5py' data_option.data.use_cache = True als = ALS(als_option, data_opt=data_option) als.initialize() als.train() als.normalize('item') als.build_itemid_map() print( 'Make item recommendation on als.ml20m.par.top10.tsv with Paralell(Thread=4)' ) par = ParALS(als) par.num_workers = 4 all_items = als._idmanager.itemids start_t = time.time() with open('als.ml20m.par.top10.tsv', 'w') as fout: for idx in range(0, len(all_items), 128): topks, _ = par.most_similar(all_items[idx:idx + 128], repr=True) for q, p in zip(all_items[idx:idx + 128], topks): fout.write('%s\t%s\n' % (q, '\t'.join(p))) print('took: %.3f secs' % (time.time() - start_t)) from n2 import HnswIndex index = HnswIndex(als.Q.shape[1]) for f in als.Q: index.add_data(f) index.build(n_threads=4) index.save('ml20m.n2.index') index.unload() print( 'Make item recommendation on als.ml20m.par.top10.tsv with Ann(Thread=1)' ) par.set_hnsw_index('ml20m.n2.index', 'item') par.num_workers = 4 start_t = time.time() with open('als.ml20m.ann.top10.tsv', 'w') as fout: for idx in range(0, len(all_items), 128): topks, _ = par.most_similar(all_items[idx:idx + 128], repr=True) for q, p in zip(all_items[idx:idx + 128], topks): fout.write('%s\t%s\n' % (q, '\t'.join(p))) print('took: %.3f secs' % (time.time() - start_t))
def test02_most_similar(self): num_cpu = psutil.cpu_count() if num_cpu < 2: return set_log_level(1) als = ALS() mp = MockParallel(als) R = 1000000 Q = self.get_factors(1000000, 12) indexes = np.random.choice(range(R), 1024).astype(np.int32) pool = np.array([], dtype=np.int32) elapsed = [] results = [] for num_workers in [1] + [i * 2 for i in range(1, num_cpu + 1) if i * 2 < num_cpu][:3]: mp.num_workers = num_workers start_t = time.time() ret = mp._most_similar('item', indexes, Q, 10, pool, -1, True) elapsed.append(time.time() - start_t) results.append(ret) for i in range(1, len(elapsed)): self.assertTrue(elapsed[i - 1] > elapsed[i] * 1.2) self.assertTrue(np.allclose(results[i - 1][0], results[i][0], atol=1e-07)) self.assertTrue(np.allclose(results[i - 1][1], results[i][1], atol=1e-07))
def test03_pool(self): set_log_level(1) als = ALS() mp = MockParallel(als) Q = self.get_factors(128, 5) indexes = np.array([0, 1, 2, 3, 4], dtype=np.int32) pool = np.array([5, 6, 7], dtype=np.int32) topks, scores = mp._most_similar(indexes, Q, 10, pool) self.assertTrue(set(topks[::].reshape(10 * 5)), set([5, 6, 7, -1]))
def test01_most_similar(self): set_log_level(1) als = ALS() mp = MockParallel(als) Q = self.get_factors(128, 5) indexes = np.array([0, 1, 2, 3, 4], dtype=np.int32) pool = np.array([], dtype=np.int32) topks1, scores1 = mp._most_similar(indexes, Q, 10, pool) topks2, scores2 = self.get_most_similar(indexes, Q, 10) self.assertTrue(np.allclose(topks1, topks2)) self.assertTrue(np.allclose(scores1, scores2))
def test04_topk(self): set_log_level(1) als = ALS() mp = MockParallel(als) P = self.get_factors(512, 5) Q = self.get_factors(128, 5) q_indexes = np.array([312, 313, 314, 315, 316], dtype=np.int32) pool = np.array([], dtype=np.int32) topks1, scores1 = mp._topk_recommendation(q_indexes, P, Q, 10, pool) topks2, scores2 = self.get_topk(q_indexes, P, Q, 10) self.assertTrue(np.allclose(topks1, topks2)) self.assertTrue(np.allclose(scores1, scores2))
def test06_topk_pool(self): set_log_level(2) data_opt = self.get_ml100k_mm_opt() opt = ALSOption().get_default_option() opt.d = 20 opt.num_workers = 1 als = ALS(opt, data_opt=data_opt) als.initialize() als.train() pals = ParALS(als) pool = np.array([i for i in range(5)], dtype=np.int32) als.build_userid_map() all_keys = als._idmanager.userids[::][:10] naive = als.topk_recommendation(all_keys, topk=10, pool=pool) qkeys1, topks1, scores1 = pals.topk_recommendation(all_keys, topk=10, pool=pool, repr=True) for q, t in zip(qkeys1, topks1): self.assertEqual(naive[q], t)
def test02_most_similar(self): set_log_level(1) data_opt = self.get_ml100k_mm_opt() opt = ALSOption().get_default_option() opt.d = 20 opt.num_workers = 1 als = ALS(opt, data_opt=data_opt) als.initialize() als.train() als.build_itemid_map() pals = ParALS(als) all_keys = als._idmanager.itemids[::] start_t = time.time() [als.most_similar(k, topk=10) for k in all_keys] naive_elapsed = time.time() - start_t pals.num_workers = 4 start_t = time.time() pals.most_similar(all_keys, topk=10, repr=True) parals_elapsed = time.time() - start_t self.assertTrue(naive_elapsed > parals_elapsed * 3.0)
def test2_most_similar(self): set_log_level(2) opt = ALSOption().get_default_option() data_opt = MatrixMarketOptions().get_default_option() data_opt.input.main = self.ml_100k + 'main' data_opt.input.uid = self.ml_100k + 'uid' data_opt.input.iid = self.ml_100k + 'iid' als = ALS(opt, data_opt=data_opt) als.initialize() als.train() q1, q2, q3 = '49.Star_Wars_(1977)', '180.Return_of_the_Jedi_(1983)', '171.Empire_Strikes_Back,_The_(1980)' self._test_most_similar(als, q1, q2, q3)
def als(self, database, **kwargs): from pyspark.sql import SparkSession from pyspark.ml.recommendation import ALS from pyspark import SparkConf, SparkContext opts = self.get_option('pyspark', 'als', **kwargs) conf = SparkConf()\ .setAppName("pyspark")\ .setMaster('local[%s]' % kwargs.get('num_workers'))\ .set('spark.local.dir', './tmp/')\ .set('spark.worker.cleanup.enabled', 'true')\ .set('spark.driver.memory', '32G') context = SparkContext(conf=conf) context.setLogLevel('WARN') spark = SparkSession(context) data = self.get_database(database, spark=spark, context=context) print(opts) als = ALS(**opts) elapsed, memory_usage = self.run(als.fit, data) spark.stop() return elapsed, memory_usage
def test02_most_similar(self): set_log_level(1) als = ALS() mp = MockParallel(als) R = 1000000 Q = self.get_factors(1000000, 12) indexes = np.random.choice(range(R), 1024).astype(np.int32) pool = np.array([], dtype=np.int32) elapsed = [] results = [] for num_workers in [1, 2, 4, 8]: mp.num_workers = num_workers start_t = time.time() ret = mp._most_similar(indexes, Q, 10, pool) elapsed.append(time.time() - start_t) results.append(ret) for i in range(1, len(elapsed)): self.assertTrue(elapsed[i - 1] > elapsed[i] * 1.5) self.assertTrue(np.allclose(results[i - 1][0], results[i][0])) self.assertTrue(np.allclose(results[i - 1][1], results[i][1]))
def test00_tensorboard(self): set_log_level(2) opt = ALSOption().get_default_option() opt.d = 5 opt.validation = aux.Option({'topk': 10}) opt.tensorboard = aux.Option({'root': './tb', 'name': 'als'}) data_opt = MatrixMarketOptions().get_default_option() data_opt.input.main = self.ml_100k + 'main' data_opt.input.uid = self.ml_100k + 'uid' data_opt.input.iid = self.ml_100k + 'iid' data_opt.data.value_prepro = aux.Option({'name': 'OneBased'}) als = ALS(opt, data_opt=data_opt) als.initialize() als.train() results = als.get_validation_results() self.assertTrue(results['ndcg'] > 0.025) self.assertTrue(results['map'] > 0.015)
def example1(): log.set_log_level(log.DEBUG) als_option = ALSOption().get_default_option() als_option.validation = aux.Option({'topk': 10}) data_option = MatrixMarketOptions().get_default_option() data_option.input.main = '../tests/ext/ml-100k/main' data_option.input.iid = '../tests/ext/ml-100k/iid' als = ALS(als_option, data_opt=data_option) als.initialize() als.train() print('MovieLens 100k metrics for validations\n%s' % json.dumps(als.get_validation_results(), indent=2)) print('Similar movies to Star_Wars_(1977)') for rank, (movie_name, score) in enumerate(als.most_similar('49.Star_Wars_(1977)')): print(f'{rank + 1:02d}. {score:.3f} {movie_name}') print('Run hyper parameter optimization for val_ndcg...') als.opt.num_workers = 4 als.opt.evaluation_period = 10 als.opt.optimize = aux.Option({ 'loss': 'val_ndcg', 'max_trials': 100, 'deployment': True, 'start_with_default_parameters': True, 'space': { 'd': ['randint', ['d', 10, 128]], 'reg_u': ['uniform', ['reg_u', 0.1, 1.0]], 'reg_i': ['uniform', ['reg_i', 0.1, 1.0]], 'alpha': ['randint', ['alpha', 1, 10]], } }) log.set_log_level(log.INFO) als.opt.model_path = './example1.ml100k.als.optimize.bin' print( json.dumps( { 'alpha': als.opt.alpha, 'd': als.opt.d, 'reg_u': als.opt.reg_u, 'reg_i': als.opt.reg_i }, indent=2)) als.optimize() als.load('./example1.ml100k.als.optimize.bin') print('Similar movies to Star_Wars_(1977)') for rank, (movie_name, score) in enumerate(als.most_similar('49.Star_Wars_(1977)')): print(f'{rank + 1:02d}. {score:.3f} {movie_name}') optimization_res = als.get_optimization_data() best_parameters = optimization_res['best_parameters'] print(json.dumps(optimization_res['best'], indent=2)) print( json.dumps( { 'alpha': best_parameters['alpha'], 'd': best_parameters['d'], 'reg_u': best_parameters['reg_u'], 'reg_i': best_parameters['reg_i'] }, indent=2))
def test04_optimize(self): set_log_level(2) opt = ALSOption().get_default_option() opt.d = 5 opt.num_workers = 2 opt.model_path = 'als.bin' opt.validation = aux.Option({'topk': 10}) optimize_option = aux.Option({ 'loss': 'val_rmse', 'max_trials': 10, 'deployment': True, 'start_with_default_parameters': True, 'space': { 'd': ['randint', ['d', 10, 20]], 'reg_u': ['uniform', ['reg_u', 0.1, 0.3]], 'reg_i': ['uniform', ['reg_i', 0.1, 0.3]], 'alpha': ['randint', ['alpha', 8, 10]] } }) opt.optimize = optimize_option opt.evaluation_period = 1 opt.tensorboard = aux.Option({'root': './tb', 'name': 'als'}) data_opt = MatrixMarketOptions().get_default_option() data_opt.input.main = self.ml_100k + 'main' data_opt.input.uid = self.ml_100k + 'uid' data_opt.input.iid = self.ml_100k + 'iid' data_opt.data.value_prepro = aux.Option({'name': 'OneBased'}) als = ALS(opt, data_opt=data_opt) als.init_factors() als.train() default_result = als.get_validation_results() als.optimize() base_loss = default_result['rmse'] # val_rmse optimize_loss = als.get_optimization_data()['best']['val_rmse'] self.assertTrue(base_loss > optimize_loss) als.load('als.bin') loss = als.get_validation_results() self.assertAlmostEqual(loss['rmse'], optimize_loss) os.remove('als.bin')
def test2_init_with_dict(self): set_log_level(3) opt = ALSOption().get_default_option() ALS(opt) self.assertTrue(True)
app = Flask(__name__) users = pd.read_json('./model/users.json', typ='frame') books = pd.read_json('./model/books.json', typ='frame') #books = pd.read_excel('./model/books.xlsx') books['ISBN'] = books['ISBN'].astype(str) books['권'] = books['권'].fillna('') userbook_map = hp.get_userbook_map(users) user_items, uid_to_idx, idx_to_uid, mid_to_idx, idx_to_mid = hp.df_to_matrix( userbook_map, 'user_id', 'book_id') iid = list(idx_to_mid.values()) uid = list(idx_to_uid.values()) #model = ALS(opt, data_opt=data_opt) model = ALS() model.load('./model/als.optimize.bin') headers = { 'X-Naver-Client-Id': '', 'X-Naver-Client-Secret': '', } def get_bookimage(params): response = requests.get('https://openapi.naver.com/v1/search/book_adv.xml', headers=headers, params=params) root = ElementTree.fromstring(response.text) img_url = ''
def test00_init(self): set_log_level(1) als = ALS() MockParallel(als) self.assertTrue(True)