def __init__(self, path, model_c, model_cn, model_s, model_sn): self.recs = [ Recommender(path, model_c), Recommender(path, model_cn), Recommender(path, model_s), Recommender(path, model_sn) ] self.models = [ "cbow", "cbow-negative", "skipgram", "skipgram-negative" ] self.test_scenarios = []
def main(): # test all_data_df, time_data_df, category_data_df = parser.get_business_data() print("Enter a state, no abbreviations") #fix this state = input() print("Enter a city") city = input() print("Enter a lower and upper bound for a desired rating of the restaurant, in this format: lower_bound,upper_bound") bounds = input() bounds = bounds.split(",") print("Enter food categories, split with commas, no space.") categories = input() categories = categories.split(",") all_data_df = select_location(all_data_df, state, city) all_data_df = select_range(all_data_df, float(bounds[0]), float(bounds[1])) place_ids = get_place_ids(all_data_df, category_data_df, categories) print("Enter 0 for a random restaurant, and 1 for a list of restaurants") choice = input() if int(choice) == 0: select_ran_res(all_data_df, list(place_ids), time_data_df) print("See recommendation? <Y/N>") recommendation = input() if recommendation == "Y": recommender = Recommender(all_data_df, list(place_ids)) recommender.print_random() else: show_all_res(all_data_df, list(place_ids))
def setUp(self): super(TestRecommender, self).setUp() mongo_source = MongoDataSource(MONGO_URL, FEATURE_FILE) mongo_source.save_bounds() self.data_source = mongo_source # print self.data_source.list_users() self.client = Recommender(SERVER_URL, mongo_source)
def main(): R = utils.load_from_file('data/R.bin').astype(float) Y = utils.load_from_file('data/Y.bin') # reg_list = [1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 10, 100] reg_list = [1e3, 1e4] num_features_list = [45, 50] model = Recommender(Y=Y, R=R) # for reg in reg_list: # print("::: Trying reg = {}".format(reg)) # model.learn(verbose=True, reg=reg, num_features=DEFAULT_NUM_FEATURES, maxiter=DEFAULT_MAX_ITER) # rmse = model.rmse() # mae = model.mae() # with open("log.csv", "a", newline='') as csvfile: # csvwriter = csv.writer(csvfile) # csvwriter.writerow([DEFAULT_NUM_FEATURES, reg, rmse, mae]) for num_features in num_features_list: print("::: Trying num_feature = {}".format(num_features)) model.learn(verbose=True, reg=DEFAULT_REG, num_features=num_features, maxiter=DEFAULT_MAX_ITER) rmse = model.rmse() mae = model.mae() with open("log.csv", "a", newline='') as csvfile: csvwriter = csv.writer(csvfile) csvwriter.writerow([num_features, DEFAULT_REG, rmse, mae])
def __init__(self): logging.info("Setting up AppRecommender...") self.cfg = Config() self.rec = Recommender(self.cfg) self.requests_dir = "/var/www/AppRecommender/src/web/requests/" if not os.path.exists(self.requests_dir): os.makedirs(self.requests_dir)
def results(): # if personal_rating list is empty, reroute to 404 page if check_personal_ratings(session["personal_ratings"]): return redirect(url_for('to404')) try: # create recommender object recommender = Recommender(session["personal_ratings"]) # get array of five results results = recommender.get_result() # store first result first_result = results.pop(0) # store first result release year first_result_year = (first_result["release_date"])[0:4] return render_template('results.html', title="Results", first_result=first_result, first_result_year=first_result_year, results=results) except Exception as e: print(e) return redirect(url_for('to404'))
def recommender(): '''Creates user profile and recommends job''' skill1 = request.form.get('skill1') != None skill2 = request.form.get('skill2') != None skill3 = request.form.get('skill3') != None skill4 = request.form.get('skill4') != None skill5 = request.form.get('skill5') != None skill6 = request.form.get('skill6') != None skill7 = request.form.get('skill7') != None skill8 = request.form.get('skill8') != None skill9 = request.form.get('skill9') != None skill10 = request.form.get('skill10') != None skill11 = request.form.get('skill11') != None skill12 = request.form.get('skill12') != None skill13 = request.form.get('skill13') != None skill14 = request.form.get('skill14') != None skill15 = request.form.get('skill15') != None skill16 = request.form.get('skill16') != None skill17 = request.form.get('skill17') != None user_vector = [skill1, skill2, skill3, skill4, skill5, skill6, skill7, skill8, skill9, skill10, skill11, skill12, skill13, skill14, skill15, skill16, skill17] r = Recommender(user_vector) recs = r.recommend() descrip = r.rec_descrip str_recs = ' '.join(recs) return render_template('base.html') + f'''
def recommend(): """ Route used for recommending. Receives a user_id param for specifying the user and returns a json with all the results. """ recommender = Recommender() user_id = int(request.args.get('user_id')) - 1 return recommender.recommend([user_id])
def get_recommendations(builds, clean=cleaned, svd=None, encoder=False): clean = clean.drop_duplicates() print(clean) preprocessors = [StandardScaler()] if svd is not None and svd: svd = int(svd) svd = min([40, svd]) preprocessors = [TruncatedSVD(svd)] clean[reg_cols] = scaler_reg.transform(clean[reg_cols]) # if encoder is not None and encoder: # #drop # #scale # predict = autoencoder_model.predict(clean.loc[builds] ) # #unscale # #rename and combine columns recommender = Recommender( drop_columns=[ 'Date Published', 'price_build', 'number_ratings', 'avg_rating', 'storage_price' ], preprocessors=preprocessors, # feature_weights = {'Core Clock' : 10}, ) recommender.fit(clean) return recommender.recommend(clean.loc[builds])
def helper(passenger): arrival = passenger.get_arrival_gate() dest = passenger.get_dest_gate() r = Recommender(demo=True) checkpoints = r.recommendations() curr_time = arrival['time'] stack = [] stack.append({ 'time': curr_time.strftime("%H:%M"), 'title': 'Departure Gate {}'.format(dest['gate']), 'subtitle': '{} to {}'.format(dest['flight'], dest['to']) }) for chk in checkpoints: d = {} curr_time = curr_time - timedelta(minutes=randint(15, 60)) d['time'] = curr_time.strftime("%H:%M") d['title'] = chk['name'] max_char = 65 # KYLE: change to vary maximum characters allowed d['subtitle'] = chk['description'][:max_char] + '...' stack.append(d) stack.append({ 'time': (curr_time - timedelta(minutes=randint(30, 80))).strftime("%H:%M"), 'title': 'Arrival Gate {}'.format(arrival['gate']), 'subtitle': '{} from {}'.format(arrival['flight'], arrival['from']) }) return [i for i in reversed(stack)]
def eval_model(parameters): print("Parameters:") pprint(parameters) print() rank = int(parameters['rank']) regParam = parameters['regParam'] lambda_1 = parameters['lambda_1'] lambda_2 = parameters['lambda_2'] # maxIter = int(parameters['maxIter']) estimator = Recommender( useALS=True, useBias=True, rank=rank, regParam=regParam, lambda_1=lambda_1, lambda_2=lambda_2, lambda_3=0.0, # maxIter=maxIter, userCol='user', itemCol='item', ratingCol='rating', nonnegative=False) train_score, test_score = score_model(estimator) return {'loss': test_score, 'status': hyperopt.STATUS_OK}
def main(): parser = argparse.ArgumentParser(description='Nyc Event Recommender') parser = argparse.ArgumentParser() parser.add_argument('-t', '--today', action='store_true', help='Show today\'s events') parser.add_argument('-a', '--all', action='store_true', help='Show events all week') parser.add_argument('-j', '--json', action='store_true', help='Show events in json format') args = parser.parse_args() if args.today or args.all: # os.system('cls' if os.name == 'nt' else 'clear') today = args.today json = args.json recommender = Recommender(today, json) events = recommender.get_recommendation() print(events) else: parser.print_help()
def main(args): start_time = time.time() simfcn = 'cosine' similarities = pickle.load(open("jhu.recommended.model", "rb")) recommender = Recommender(args.school, simfcn) ptts = [] if args.action == "all": ptts = PersonalTimetable.objects.filter(school=args.school, semester=Semester.objects.filter(name=args.semester, year=args.year)) else: print(args.action) major_students = Student.objects.filter(major=args.action) ptts = PersonalTimetable.objects.filter(school=args.school, semester=Semester.objects.filter(name=args.semester, year=args.year), student__in=major_students) scores = {} num_timetables = {} for ptt in ptts: course_ids = map(lambda c: c.id, list(ptt.courses.all())) length = len(course_ids) if length < args.num_remove + 1: continue s = score(recommender, course_ids, similarities, args.num_remove) if length not in scores: scores[length] = np.zeros(args.num_remove) num_timetables[length] = 0 scores[length] += s num_timetables[length] += 1 print(num_timetables) for length in scores: scores[length] /= float(num_timetables[length]) print(scores) pickle.dump(scores, open('recommender.' + args.school + '.scores', "wb"))
def create_app(spark_context, cfg_file_path): global recommender recommender = Recommender(spark_context, cfg_file_path) app = Flask(__name__) app.register_blueprint(main) app.config['SECRET_KEY'] = '7d441f27d441f27567d441f2b6176a' app.config['TEMPLATES_AUTO_RELOAD'] = True return app
def get(self, user_id): self.render('view_recommendations.html', title="View Recommendations", user_id=user_id, user_name=User(user_id=user_id).retrieve_user_name(), recommendations=Recommender( Ratings().retrieve_all_user_ratings()).recommend( int(user_id)))
def load_models_and_businesses(spark_context): global recommender global richer_biz_info global model model_path = __get_model_path() recommender = Recommender(spark_context, model_path) model = recommender.load_mf_model()
def analyzeBestFitUser(): """ 对测试集中最佳预测的用户进行深入的探究 """ movies, movieTagMat, userRankMat, testCases = loadData() user2userPredictor = user2user(userRankMat, topK=105) item2itemPredictor = item2item(userRankMat, movieTagMat, topK=20) # do test # _, results = predictTest(user2userPredictor, testCases, "") # _, results = predictTest(item2itemPredictor, testCases, "") # userAvgSSE = defaultdict(float) # for res in results: # userAvgSSE[res[0]] += (res[2] - res[1]) ** 2 # sse = list(userAvgSSE.items()) # sse.sort(key=lambda x: x[1]) # # best-fit user # uid, minSSE = sse[0] # print("(uid, smallest SSE): ({}, {})".format(uid, minSSE)) uid = 480 # do recommend # 使用不同的推荐系统进行结果对比 # recommender = Recommender(movieTagMat, userRankMat, movies, user2userPredictor) recommender = Recommender(movieTagMat, userRankMat, movies, item2itemPredictor) recommendMovies = recommender.doRecommend(uid, 50)["recommended_movies"] print("recommended movies:") recommendedCategory = defaultdict(int) for m, r in recommendMovies.items(): for genre in movies[r[0]].genres: recommendedCategory[genre] += 1 for k, v in sorted(recommendedCategory.items(), key=lambda d: d[0], reverse=True): print("{}: {}".format(k, v)) print("") # compare print("His or her favorite movies:") userRank = userRankMat[uid] idx = np.argsort(-userRank)[:50] userLikeCategory = defaultdict(int) for i in idx: for genre in movies[i].genres: userLikeCategory[genre] += 1 for k, v in sorted(userLikeCategory.items(), key=lambda d: d[0], reverse=True): print("{}: {}".format(k, v)) print("") for k, v in recommendedCategory.items(): if k in userLikeCategory: print("{},{},{}".format(k, v, userLikeCategory[k])) else: print("{},{},0".format(k, v)) for k, v in userLikeCategory.items(): if k not in recommendedCategory: print("{},0,{}".format(k, v))
def __init__(self): print( "Hello, welcome to use What2watch movie search and recommendation system.\n" ) print( "Next, please select you want to use the search engine or the recommender.\n" ) self.searcher = Searcher() self.recommender = Recommender()
def __init__(self, config={}): """ Requires configuration from cortex.yaml """ # When using s3 bucket to download the model # s3 = boto3.client("s3") # s3.download_file(config["bucket"], config["key"], "w2v_limitingfactor_v3.51.model") self.model = Recommender('models/w2v_limitingfactor_v3.51.model') pass
def init_recommender_system(self, rating_column, descriptor, five_feature_columns, two_group_columns): utility_matrix, in_stock_reviews, users, items = self.get_utility_matrix( ) self.recommender_system = Recommender(utility_matrix, in_stock_reviews, rating_column, descriptor, five_feature_columns, two_group_columns) return self.recommender_system
def __init__(self): logging.info("Setting up survey...") self.cfg = Config() self.rec = Recommender(self.cfg) self.submissions_dir = "/var/www/AppRecommender/src/web/submissions/" if not os.path.exists(self.submissions_dir): os.makedirs(self.submissions_dir) self.strategies = [ "cbh", "cbh_eset", "knn", "knn_eset", "knn_plus", "knnco" ]
def home(): form = UserInput() if form.validate_on_submit(): flash('Data taken successfully','success') data = request.form r = Recommender() global search results,search = r.recommend(data) return render_template('results.html',results=results) return render_template('home.html',form=form)
def recommend(event, context=None): body = event["body"] if not body: error = "post body is null or empty" logger.error(error) return {"statusCode": 500, "body": json.dumps({"error": error})} segment = json.loads(body) logger.info(segment) recommender = Recommender() # @todo, pass influencer seeds as argument influencer = os.path.join(os.path.join(local_dir, 'seeds'), 'mike.json') with open(influencer) as json_data: influences = json.load(json_data) logger.info(influences) recommender.artists = [a['id'] for a in influences['artists']] recommender.tracks = [t['id'] for t in influences['tracks']] recommender.genres = influences['genres'] segment = Segment( segment['start_time'], segment['end_time'], segment['segment_type'], Power(segment['power']['min_intensity'], segment['power']['max_intensity']), segment['cadence']) try: results = recommender.get_tracks_for_segment(segment) print('got results from recommender.get_tracks_for_segment') logger.info(json.dumps(results, indent=4, sort_keys=True)) return { "statusCode": 200, "headers": _get_headers(), "body": json.dumps(results, indent=4, sort_keys=True) } except Exception as e: logger.error(e.__doc__) logger.error(e.message) logger.error(traceback.format_exc()) return { "statusCode": 500, "headers": _get_headers(), "body": json.dumps({"error": error}) }
def recommendations(): data = request.args try: access_key = str(dict(data)['access_key'][0]) except: return accessKeyRequired() agent = Agent.query.filter_by(access_key=access_key).first() if agent is None: return accessKeyRequired(not_in_db=True) try: agent_user_id = str(data['user_id']) except: return badRequest() user = User.query.filter_by(agent_user_id=agent_user_id, agent_id=agent.id).first() if user is None: return badRequest(user=True) user_movies = get_user_movies(user.id) if not user_movies: return 'User has no movies' #, Response(403) jsonify([]) if agent.agent_name == 'vkino': user_tmdb_movies = [] for movie in user_movies: tmdbid = Vkino.query.filter_by(vkino_id=movie).first().tmdb_fk_id if tmdbid is None: continue else: user_tmdb_movies.append(tmdbid) if not user_tmdb_movies: return jsonify(recommendations=[]) min_num_of_recs = 3 rs = Recommender(user_tmdb_movies, agent.agent_name + '_movies', agent.agent_name + '_id', user.id) rs.tmdb_input_info() rs.form_characteristics() result = rs.get_recommendations() if not result: return jsonify(recommendations=result) agent_premiere_idx = [movie['pk_id'] for movie in result[:min_num_of_recs]] if type(agent_premiere_idx) is list: r = Recommendation(user.id, agent.id, datetime.now()) db.session.add(r) db.session.commit() for movie in agent_premiere_idx: mr = MovieRecommendation(r.id, movie, datetime.now()) db.session.add(mr) db.session.commit() return jsonify(recommendations=agent_premiere_idx, recommendation_id=r.id)
def process(headline, description): tweets_filtered = [] query = headline + ' ' + description prediction = predictor.predictor(query) prediction = sorted(prediction, key=lambda i: i[1], reverse=True) index = prediction[0][0] category = categories[index] r = Recommender(query, dataset[category]) results = r.return_results() #category_index = return jsonify(results), 200
def run_strategy(cfg, sample_file): rec = Recommender(cfg) repo_size = rec.items_repository.get_doccount() results = ExperimentResults(repo_size) label = get_label(cfg) population_sample = [] sample_str = sample_file.split('/')[-1] with open(sample_file, 'r') as f: for line in f.readlines(): user_id = line.strip('\n') population_sample.append( os.path.join(cfg.popcon_dir, user_id[:2], user_id)) sample_dir = ("results/roc-sample/%s" % sample_str) if not os.path.exists(sample_dir): os.makedirs(sample_dir) log_file = os.path.join(sample_dir, label["values"]) # n iterations per population user for submission_file in population_sample: user = PopconSystem(submission_file) user.filter_pkg_profile(cfg.pkgs_filter) user.maximal_pkg_profile() for n in range(iterations): # Fill sample profile profile_len = len(user.pkg_profile) item_score = {} for pkg in user.pkg_profile: item_score[pkg] = user.item_score[pkg] sample = {} sample_size = int(profile_len * 0.9) for i in range(sample_size): key = random.choice(item_score.keys()) sample[key] = item_score.pop(key) iteration_user = User(item_score) recommendation = rec.get_recommendation(iteration_user, repo_size) if hasattr(recommendation, "ranking"): results.add_result(recommendation.ranking, sample) plot_roc(results, log_file) plot_roc(results, log_file, 1) with open(log_file + "-roc.jpg.comment", 'w') as f: f.write("# %s\n# %s\n\n" % (label["description"], label["values"])) f.write("# roc AUC\n%.4f\n\n" % results.get_auc()) f.write( "# threshold\tmean_fpr\tdev_fpr\t\tmean_tpr\tdev_tpr\t\tcoverage\n" ) # noqa for size in results.thresholds: f.write( "%4d\t\t%.4f\t\t%.4f\t\t%.4f\t\t%.4f\t\t%.4f\n" % (size, numpy.mean(results.fpr[size]), numpy.std(results.fpr[size]), numpy.mean( results.recall[size]), numpy.std(results.recall[size]), numpy.mean(results.coverage(size))))
def main(): graph = Graph(args["movie_path"], args["ratings_path"]) graph.constructGraph() print("") print("Are you a new user [y/n]: ") a = str(input()) if (a == 'y' or a == 'Y'): print("Enter your name: ") name = str(input()) user = User(name) recommender = Recommender(graph, user.userId) recommender.addUserToGraph() trie = searchTrie(recommender.movieTitles) print("Search for keyword: ") keyword = str(input()) user.searchMovie(trie, keyword, recommender.movieTitles, recommender.movieId) print("Enter movieId from above to watch and rate: ") movieId = list(map(int, input().split(" "))) for x in movieId: user.watchAndrateMovie(x, recommender) print("Here are some recommendations for you\n") recommender.recommend() recommender.saveMatrixToNumpyFile() else: print("Enter your userId(1 to 297): ") userId = int(input()) print("Enter your name: ") name = str(input()) user = User(name, userId, new=False) recommender = Recommender(graph, user.userId) print("Here are some recommendations for you\n") recommender.recommend()
def run_strategy(cfg, user): for weight in weighting: cfg.weight = weight[0] cfg.bm25_k1 = weight[1] rec = Recommender(cfg) repo_size = rec.items_repository.get_doccount() for proportion in sample_proportions: results = ExperimentResults(repo_size) label = get_label(cfg, proportion) log_file = "results/strategies/" + label["values"] for n in range(iterations): # Fill sample profile profile_size = len(user.pkg_profile) item_score = {} for pkg in user.pkg_profile: item_score[pkg] = user.item_score[pkg] sample = {} sample_size = int(profile_size * proportion) for i in range(sample_size): key = random.choice(item_score.keys()) sample[key] = item_score.pop(key) iteration_user = User(item_score) recommendation = rec.get_recommendation( iteration_user, repo_size) write_recall_log(label, n, sample, recommendation, profile_size, repo_size, log_file) if hasattr(recommendation, "ranking"): results.add_result(recommendation.ranking, sample) with open(log_file, 'w') as f: precision_10 = sum(results.precision[10]) / len( results.precision[10]) f1_10 = sum(results.f1[10]) / len(results.f1[10]) f05_10 = sum(results.f05[10]) / len(results.f05[10]) f.write("# %s\n# %s\n\ncoverage %d\n\n" % (label["description"], label["values"], recommendation.size)) f.write("# best results (recommendation size; metric)\n") f.write( "precision (%d; %.2f)\nf1 (%d; %.2f)\nf05 (%d; %.2f)\n\n" % (results.best_precision()[0], results.best_precision()[1], results.best_f1()[0], results.best_f1()[1], results.best_f05()[0], results.best_f05()[1])) f.write( "# recommendation size 10\nprecision (10; %.2f)\nf1 (10; %.2f)\nf05 (10; %.2f)" % # noqa (precision_10, f1_10, f05_10)) precision = results.get_precision_summary() recall = results.get_recall_summary() f1 = results.get_f1_summary() f05 = results.get_f05_summary() accuracy = results.get_accuracy_summary() plot_summary(precision, recall, f1, f05, accuracy, log_file)
def authentication(ckey, csecret, atoken, atokensecret, topic): consumer_key = ckey consumer_secret = csecret access_token = atoken access_token_secret = atokensecret auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) results = [ status._json for status in tweepy.Cursor(api.search, q=topic, count=1000).items(1000)] my_tweets = api.user_timeline() my_first_tweet = my_tweets[0].text following = api.followers() recommenderObj = Recommender() generatedTweet = recommenderObj.generate(my_tweets, 1, following, 2) accounts_recommend = recommenderObj.accounts_recommender(results) user_mentions = [ mention[0] for mention in accounts_recommend[0]] users_mentions_counts = [mention[1] for mention in accounts_recommend[0]] users = [user[0] for user in accounts_recommend[1]] users_counts = [user[1] for user in accounts_recommend[1]] return template("My first Tweet was: {{my_first_tweet_here}}, my generated text is {{generatedTweetHere}}" "Your Topic is: {{tweets_topic}}." "Here are some accounts that you may interested in:" "Among all the tweets:" "@{{first_mention}} was mentioned {{first_metion_count}} times." "@{{second_mention}} was mentioned {{second_metion_count}} times." "@{{third_mention}} was mentioned {{third_metion_count}} times." "{{first_user_count}} of @{{first_user}}'s tweets are about {{tweets_topic}}." "{{second_user_count}} of @{{second_user}}'s tweets are about {{tweets_topic}}." "{{third_user_count}} of @{{third_user}}'s tweets are about {{tweets_topic}}." , my_first_tweet_here = my_first_tweet, generatedTweetHere = generatedTweet, tweets_topic = topic, first_mention = user_mentions[0], second_mention = user_mentions[1], third_mention = user_mentions[2], first_metion_count = users_mentions_counts[0], second_metion_count = users_mentions_counts[1], third_metion_count = users_mentions_counts[2], first_user_count = users_counts[0], second_user_count = users_counts[1], third_user_count = users_counts[2], first_user = users[0], second_user = users[1], third_user = users[2])
def train(self): viewed_together_data = self.read_data( self.data_paths[self.config.VIEWED_TOGETHER]) bought_together_data = self.read_data( self.data_paths[self.config.BOUGHT_TOGETHER]) all_products_data = self.read_data( self.data_paths[self.config.ALL_PRODUCTS]) price_list_data = self.read_data( self.data_paths[self.config.PRICE_LIST]) """getting some columns in lower case""" transformed_all_products_data = uniform_data(all_products_data, self.product_attributes) """explode the lists into tuples of combinations per session ID for views and per user in bought""" print( "For the view Dataframe breaking lists of brands, product categories, product_types " "into permutations of brands, product categories, product_types as a list of tuples" ) viewed_together_cols, group_by_col = [ 'SID_IDX', 'CONFIG_ID', 'PRODUCT_CATEGORY', 'PRODUCT_TYPE', 'BRAND' ], 'SID_IDX' (tuple_list_viewed_brand, tuple_list_viewed_product_category, tuple_list_viewed_product_type, tuple_list_viewed_config) = self.transform_data( viewed_together_data, self.product_attributes, viewed_together_cols, group_by_col) print( "For the bought Dataframe breaking lists of brands, product categories, product_types " "into permutations of brands, product categories, product_types as a list of tuples" ) bought_together_cols, group_by_col = [ 'CUSTOMER_IDX', 'CONFIG_ID', 'PRODUCT_CATEGORY', 'PRODUCT_TYPE', 'BRAND' ], 'CUSTOMER_IDX' (tuple_list_bought_brand, tuple_list_bought_product_category, tuple_list_bought_product_type, tuple_list_bought_config) = self.transform_data( bought_together_data, self.product_attributes, bought_together_cols, group_by_col) recommender = Recommender() trained_data, _ = recommender.fit( tuple_list_viewed_brand, tuple_list_bought_brand, tuple_list_viewed_product_category, tuple_list_bought_product_category, tuple_list_viewed_product_type, tuple_list_bought_product_type, tuple_list_viewed_config, tuple_list_bought_config, transformed_all_products_data, price_list_data) self.write_data(trained_data)