예제 #1
0
def test_build_features():

    users, items = 10, 100

    dataset = Dataset(user_identity_features=False, item_identity_features=False)
    dataset.fit(
        range(users),
        range(items),
        ["user:{}".format(x) for x in range(users)],
        ["item:{}".format(x) for x in range(items)],
    )

    # Build from lists
    user_features = dataset.build_user_features(
        [
            (user_id, ["user:{}".format(x) for x in range(users)])
            for user_id in range(users)
        ]
    )
    assert user_features.getnnz() == users ** 2

    item_features = dataset.build_item_features(
        [
            (item_id, ["item:{}".format(x) for x in range(items)])
            for item_id in range(items)
        ]
    )
    assert item_features.getnnz() == items ** 2

    # Build from dicts
    user_features = dataset.build_user_features(
        [
            (user_id, {"user:{}".format(x): float(x) for x in range(users)})
            for user_id in range(users)
        ],
        normalize=False,
    )

    assert np.all(user_features.todense() == np.array([list(range(users))] * users))

    item_features = dataset.build_item_features(
        [
            (item_id, {"item:{}".format(x): float(x) for x in range(items)})
            for item_id in range(items)
        ],
        normalize=False,
    )

    assert np.all(item_features.todense() == np.array([list(range(items))] * items))

    # Test normalization
    item_features = dataset.build_item_features(
        [
            (item_id, {"item:{}".format(x): float(x) for x in range(items)})
            for item_id in range(items)
        ]
    )

    assert np.all(item_features.sum(1) == 1.0)
예제 #2
0
def test_build_features():

    users, items = 10, 100

    dataset = Dataset(user_identity_features=False, item_identity_features=False)
    dataset.fit(
        range(users),
        range(items),
        ["user:{}".format(x) for x in range(users)],
        ["item:{}".format(x) for x in range(items)],
    )

    # Build from lists
    user_features = dataset.build_user_features(
        [(user_id, ["user:{}".format(x) for x in range(users)]) for user_id in range(users)]
    )
    assert user_features.getnnz() == users ** 2

    item_features = dataset.build_item_features(
        [(item_id, ["item:{}".format(x) for x in range(items)]) for item_id in range(items)]
    )
    assert item_features.getnnz() == items ** 2

    # Build from dicts
    user_features = dataset.build_user_features(
        [
            (user_id, {"user:{}".format(x): float(x) for x in range(users)})
            for user_id in range(users)
        ],
        normalize=False,
    )

    assert np.all(user_features.todense() == np.array([list(range(users))] * users))

    item_features = dataset.build_item_features(
        [
            (item_id, {"item:{}".format(x): float(x) for x in range(items)})
            for item_id in range(items)
        ],
        normalize=False,
    )

    assert np.all(item_features.todense() == np.array([list(range(items))] * items))

    # Test normalization
    item_features = dataset.build_item_features(
        [
            (item_id, {"item:{}".format(x): float(x) for x in range(items)})
            for item_id in range(items)
        ]
    )

    assert np.all(item_features.sum(1) == 1.0)
예제 #3
0
def interactions(df):
    movie_genre = [x.split("|") for x in df["genre"]]
    all_movie_genre = sorted(
        list(set(itertools.chain.from_iterable(movie_genre))))

    all_occupations = sorted(list(set(df["occupation"])))

    dataset = Dataset()
    dataset.fit(
        df["userID"],
        df["itemID"],
        item_features=all_movie_genre,
        user_features=all_occupations,
    )

    item_features = dataset.build_item_features(
        (x, y) for x, y in zip(df.itemID, movie_genre))

    user_features = dataset.build_user_features(
        (x, [y]) for x, y in zip(df.userID, df["occupation"]))

    (interactions, _) = dataset.build_interactions(df.iloc[:, 0:3].values)

    train_interactions, test_interactions = cross_validation.random_train_test_split(
        interactions,
        test_percentage=TEST_PERCENTAGE,
        random_state=np.random.RandomState(SEEDNO),
    )
    return train_interactions, test_interactions, item_features, user_features
예제 #4
0
def prepareData(df, tags):
    df = df[df.actionCategory == "WebNei clicked"]
    actionByUsers = df.groupby(["userName", "actionName"]).size()
    uniqueUsers = df[df.userName.isin(
        actionByUsers.index.get_level_values(
            0).unique().values)].drop_duplicates('userName')
    uniqueUsers['user_features'] = uniqueUsers[[
        'title', 'team', 'organization', 'department'
    ]].values.tolist()
    dataset = Dataset()
    dataset.fit((list(actionByUsers.index.get_level_values(0))),
                (list(actionByUsers.index.get_level_values(1))))

    rowM, colM = prepareJson(tags)
    rowU, colU = prepareUserFeatures(uniqueUsers)

    dataset.fit_partial(items=rowM,
                        item_features=colM,
                        users=rowU,
                        user_features=colU)

    (interactions, weights) = dataset.build_interactions(
        zip(list(actionByUsers.index.get_level_values(0)),
            list(actionByUsers.index.get_level_values(1))))
    item_features = dataset.build_item_features(zip(rowM, [colM]))
    user_features = dataset.build_user_features(zip(rowU, [colU]))
    return interactions, item_features, user_features
예제 #5
0
def create_dataset(df, item_features, list_item_features):
    """
	function to create the dataset based on df which stores all the data including
	features (tags) of each products
	Args: df(pandas dataframe) - 
	"""
    ## create a mapping between the user and item ids from our input data
    #to indices that will be used internally by the model
    dataset = Dataset(item_identity_features=True)
    list_user_names = list(df.index)
    list_items = df.columns.values

    dataset.fit(
        (user_name for user_name in list_user_names),
        (item for item in list_items),
        item_features=(item_feature for item_feature in list_item_features))

    ## Build the interaction matrix
    # it encodes the interactions betwee users and items.
    # need (user, item) pair that has 1's in df
    list_pairs = list(df.stack().index)
    (interactions, weights) = dataset.build_interactions(
        (pair for pair in list_pairs))

    item_feature_matrix = dataset.build_item_features(item_features)

    return dataset, interactions, weights, item_feature_matrix
예제 #6
0
class DataFit:
    def __init__(self):
        self.dataset = None

    def fit(self):
        book_list = DataPrep.get_book_list()
        book_feature_list = DataPrep.get_feature_list()
        user_list = DataPrep.get_user_list()
        self.dataset = Dataset()
        self.dataset.fit(users=user_list,
                         items=book_list,
                         item_features=book_feature_list)

        rating_list = DataPrep.get_rating_list()
        interactions, weights = self.dataset.build_interactions(rating_list)

        book_features = DataPrep.create_features()
        books_features = self.dataset.build_item_features(book_features)
        return interactions, weights, books_features

    def create_new_interactions(self, checkpoint):
        rating_list = DataPrep.get_rating_list_from_checkpoint(checkpoint)
        interactions, weights = self.dataset.build_interactions(rating_list)
        return interactions, weights

    def get_user_mapping(self):
        user_id_map, user_feature_map, item_id_map, item_feature_map = self.dataset.mapping(
        )
        return user_id_map

    def get_book_mapping(self):
        user_id_map, user_feature_map, item_id_map, item_feature_map = self.dataset.mapping(
        )
        return item_id_map

    @staticmethod
    def fit_evaluate(test_percentage=0.1):
        book_list = DataPrep.get_book_list()
        book_feature_list = DataPrep.get_feature_list()
        user_list = DataPrep.get_user_list()
        dataset = Dataset()
        dataset.fit(users=user_list,
                    items=book_list,
                    item_features=book_feature_list)

        rating_list = DataPrep.get_rating_list()
        random.shuffle(rating_list)
        rating_list_test = rating_list[:int(test_percentage *
                                            len(rating_list))]
        rating_list_train = rating_list[int(test_percentage *
                                            len(rating_list)):]
        interactions_train, weights_train = dataset.build_interactions(
            rating_list_train)
        interactions_test, weights_test = dataset.build_interactions(
            rating_list_test)

        return interactions_train, weights_train, interactions_test, weights_test
    def obtener_matrices(self):
        """
        Método obtener_matrices. Obtiene las matrices necesarias para la creación de los modelos de LightFM.

        Este método solo se utiliza en la interfaz de texto.
        """

        global train, test, modelo, item_features, user_features

        # Se obtienen los dataframes
        Entrada.obtener_datos()
        ratings_df = Entrada.ratings_df
        users_df = Entrada.users_df
        items_df = Entrada.items_df

        # Se transforman los dataframes en matrices que puedan ser utilzadas por los modelos
        dataset = Dataset()
        dataset.fit(users_df[users_df.columns.values[0]],
                    items_df[items_df.columns.values[0]],
                    user_features=users_df[users_df.columns.values[1]],
                    item_features=items_df[items_df.columns.values[1]])

        # Si el modelo es colaborativo o híbrido se tienen en cuenta las valoraciones de los usuarios
        if self.opcion_modelo == 1 or self.opcion_modelo == 2:
            (interacciones, pesos) = dataset.build_interactions(
                (row[ratings_df.columns.values[0]],
                 row[ratings_df.columns.values[1]],
                 row[ratings_df.columns.values[2]])
                for index, row in ratings_df.iterrows())
        else:
            (interacciones, pesos) = dataset.build_interactions(
                (row[ratings_df.columns.values[0]],
                 row[ratings_df.columns.values[1]])
                for index, row in ratings_df.iterrows())

        # Se obtienen las matrices de features y se guardan
        item_features = dataset.build_item_features(
            (row[items_df.columns.values[0]],
             [row[items_df.columns.values[1]]])
            for index, row in items_df.iterrows())
        user_features = dataset.build_user_features(
            (row[users_df.columns.values[0]],
             [row[users_df.columns.values[1]]])
            for index, row in users_df.iterrows())
        print("Guarda la matriz de item features")
        guardar_datos_pickle(item_features, 'la matriz de item features')
        print("Guarda la matriz de user features")
        guardar_datos_pickle(user_features, 'la matriz de user feautures')

        # Se dividen las interacciones en conjuntos de entrenamiento y test y se guardan
        train, test = random_train_test_split(interacciones,
                                              test_percentage=0.2)
        print("Guarda la matriz de entrenamiento")
        guardar_datos_pickle(train, 'la matriz de entrenamiento')
        print("Guarda la matriz de test")
        guardar_datos_pickle(test, 'la matriz de test')
예제 #8
0
def test_fitting():

    users, items = 10, 100

    dataset = Dataset()
    dataset.fit(range(users), range(items))

    assert dataset.interactions_shape() == (users, items)
    assert dataset.user_features_shape() == (users, users)
    assert dataset.item_features_shape() == (items, items)

    assert dataset.build_interactions([])[0].shape == (users, items)
    assert dataset.build_user_features([]).getnnz() == users
    assert dataset.build_item_features([]).getnnz() == items
예제 #9
0
def test_fitting_no_identity():

    users, items = 10, 100

    dataset = Dataset(user_identity_features=False, item_identity_features=False)
    dataset.fit(range(users), range(items))

    assert dataset.interactions_shape() == (users, items)
    assert dataset.user_features_shape() == (users, 0)
    assert dataset.item_features_shape() == (items, 0)

    assert dataset.build_interactions([])[0].shape == (users, items)
    assert dataset.build_user_features([], normalize=False).getnnz() == 0
    assert dataset.build_item_features([], normalize=False).getnnz() == 0
예제 #10
0
def test_fitting():

    users, items = 10, 100

    dataset = Dataset()
    dataset.fit(range(users), range(items))

    assert dataset.interactions_shape() == (users, items)
    assert dataset.user_features_shape() == (users, users)
    assert dataset.item_features_shape() == (items, items)

    assert dataset.build_interactions([])[0].shape == (users, items)
    assert dataset.build_user_features([]).getnnz() == users
    assert dataset.build_item_features([]).getnnz() == items
예제 #11
0
def test_fitting_no_identity():

    users, items = 10, 100

    dataset = Dataset(user_identity_features=False, item_identity_features=False)
    dataset.fit(range(users), range(items))

    assert dataset.interactions_shape() == (users, items)
    assert dataset.user_features_shape() == (users, 0)
    assert dataset.item_features_shape() == (items, 0)

    assert dataset.build_interactions([])[0].shape == (users, items)
    assert dataset.build_user_features([], normalize=False).getnnz() == 0
    assert dataset.build_item_features([], normalize=False).getnnz() == 0
예제 #12
0
    def build_lightfm_dataset(self) -> None:
        """
        Builds final datasets for user-variant and variant-variant recommendations.
        """
        logging.info("Creating LightFM matrices...")
        lightfm_dataset = LFMDataset()
        ratings_list = self.interaction_list
        logging.info('#'*60)
        lightfm_dataset.fit_partial(
            (rating['user_id'] for rating in ratings_list),
            (rating['product_id'] for rating in ratings_list)
        )

        item_feature_names = self.item_df.columns
        logging.info(f'Logging item_feature_names - with product_id: \n{item_feature_names}')
        item_feature_names = item_feature_names[~item_feature_names.isin(['product_id'])]
        logging.info(f'Logging item_feature_names - without product_id: \n{item_feature_names}')

        for item_feature_name in item_feature_names:
            lightfm_dataset.fit_partial(
                items=(item['product_id'] for item in self.item_list),
                item_features=((item[item_feature_name] for item in self.item_list)),
            )

        item_features_data = []
        for item in self.item_list:
            item_features_data.append(
                (
                    item['product_id'],
                    [
                        item['product_name'],
                        item['aisle'],
                        item['department']
                    ],
                )
            )
        logging.info(f'Logging item_features_data @build_lightfm_dataset: \n{item_features_data}')
        self.item_features = lightfm_dataset.build_item_features(item_features_data)
        self.interactions, self.weights = lightfm_dataset.build_interactions(
            ((rating['user_id'], rating['product_id']) for rating in ratings_list)
        )

        self.n_users, self.n_items = self.interactions.shape

        logging.info(f'Logging self.interactions @build_lightfm_dataset: \n{self.interactions}')
        logging.info(f'Logging self.weights @build_lightfm_dataset: \n{self.weights}')
        logging.info(
            f'The shape of self.interactions {self.interactions.shape} '
            f'and self.weights {self.weights.shape} represent the user-item matrix.')
예제 #13
0
    def fit_data(self, matrix, user_features=None, item_features=None):
        """
        Create datasets for .fit() method.
        Args:
            matrix: User-item interactions matrix (weighted)
            user_features: User-features pandas dataframe which index contains user_ids (crd_no)
            item_features:  Item-features pandas dataframe which index contains good_ids (plu_id)
        Returns:
            Model with fitted (mapped) datasets
        """
        matrix.sort_index(inplace=True)
        matrix.sort_index(inplace=True, axis=1)
        dataset = Dataset()
        dataset.fit((x for x in matrix.index), (x for x in matrix.columns))
        interactions = pd.melt(
            matrix.replace(0, np.nan).reset_index(),
            id_vars='index',
            value_vars=list(matrix.columns[1:]),
            var_name='plu_id',
            value_name='rating').dropna().sort_values('index')
        interactions.columns = ['crd_no', 'plu_id', 'rating']
        self.interactions, self.weights = dataset.build_interactions(
            [tuple(x) for x in interactions.values])

        if user_features is not None:
            user_features.sort_index(inplace=True)
            dataset.fit_partial(users=user_features.index,
                                user_features=user_features)
            self.user_features = dataset.build_user_features(
                ((index, dict(row))
                 for index, row in user_features.iterrows()))
        else:
            self.user_features = None
        if item_features is not None:
            item_features.sort_index(inplace=True)
            dataset.fit_partial(items=item_features.index,
                                item_features=item_features)
            self.item_features = dataset.build_item_features(
                ((index, dict(row))
                 for index, row in item_features.iterrows()))
        else:
            self.item_features = None
예제 #14
0
def load_parameter():
    ratings = get_ratings()
    books = get_books()
    users = get_users()
    books_pd = convert_pd(books)

    id_users_books = StoreValue()

    for x in ratings:
        id_users_books._user_id.append(x[0])
        id_users_books._book_id.append(x[1])

    # Được tạo ra theo hướng dẫn tại https://making.lyst.com/lightfm/docs/examples/dataset.html
    dataset_explicit = Dataset()
    dataset_explicit.fit(id_users_books._user_id,
                id_users_books._book_id)

    num_users, num_items = dataset_explicit.interactions_shape()
    print('Num users: {}, num_items {}.'.format(num_users, num_items))

    dataset_explicit.fit_partial(items=(x[0] for x in books),
                        item_features=(x[7] for x in books))
    
    dataset_explicit.fit_partial(users=(x[0] for x in users))


    # create ---> mapping
    # interactions: dưới dạng COO_maxtrix, các tương tác sẽ là user_id và book_id
    # Trọng số voting
    (interactions_explicit, weights_explicit) = dataset_explicit.build_interactions((id_users_books._user_id[i], id_users_books._book_id[i]) for i in range(len(ratings)))

    # Đây là đặc trưng trích xuất từ các items (sách) dựa trên tác giả của cuốn sách được cung cấp
    item_features = dataset_explicit.build_item_features(((x[0], [x[7]]) for x in books))
    # user_features = dataset_explicit.build_user_features(((x[0], [x[1]]) for x in users))

    model_explicit_ratings = LightFM_ext(loss='warp')

    (train, test) = random_train_test_split(interactions=interactions_explicit, test_percentage=0.02)

    model_explicit_ratings.fit(train, item_features=item_features, epochs=2, num_threads=4)
    return model_explicit_ratings, dataset_explicit, interactions_explicit, weights_explicit, item_features, books_pd
def create_datasets(cluster_id):

    events_list = get_events_from_es(cluster_id)

    dataframe_interactions, dataframe_users_features, dataframe_item_features, user_tuple, item_tuple = create_interactions_and_features(events_list, cluster_id)

    print(dataframe_interactions, cluster_id, file=sys.stderr)
    print(dataframe_users_features, cluster_id, file=sys.stderr)
    print(dataframe_item_features, cluster_id, file=sys.stderr)

    #print(user_tuple)
   # print(item_tuple)

    user_features = format_users_features(dataframe_users_features)

    #print(user_features)

    item_features = format_items_features(dataframe_item_features)

    #print(item_features)

    dataset = Dataset()

    dataset.fit(
            dataframe_interactions['user'].unique(), # all the users
            dataframe_interactions['item'].unique(), # all the items
            user_features = user_features,
            item_features = item_features
    )

    (interactions, weights) = dataset.build_interactions([(x[0], x[1], x[2]) for x in dataframe_interactions.values ])

#    print(interactions)
#    print(weights)

    final_user_features = dataset.build_user_features(user_tuple, normalize= False)

    final_item_features = dataset.build_item_features(item_tuple, normalize= False)

    return dataset, interactions, weights, final_item_features, final_user_features
예제 #16
0
def predict(user_id: int) -> str:
    model_file = Path(BASE_DIR).joinpath(MODEL_FILE_NAME)
    data_file = Path(BASE_DIR).joinpath(DATA_FILE_NAME)

    if not model_file.exists():
        return None

    if not data_file.exists():
        return None

    model: LightFM = pickle.load(open(model_file, "rb"))
    data: pd.DataFrame = pd.read_csv(data_file)

    dataset = Dataset()

    dataset.fit((cac for cac in data.cac.unique()),
                (product for product in data.product_code.unique()))

    features = ['product_code', 'country_code', 'cost_bin']

    for product_feature in features:
        dataset.fit_partial(
            users=(cac for cac in data.cac.unique()),
            items=(product for product in data.product_code.unique()),
            item_features=(feature
                           for feature in data[product_feature].unique()))

    item_features = dataset.build_item_features(((getattr(row, 'product_code'), [getattr(row, product_feature) for product_feature in features if product_feature != 'product_code']) \
            for row in data[features].itertuples()))

    predicted_products: List[str] = sample_recommendation(
        model=model,
        dataset=dataset,
        raw_data=data,
        item_features=item_features,
        user_ids=user_id)

    return predicted_products
예제 #17
0
def train_model(
               df, user_id_col='user_id', item_id_col='business_id',
               item_name_col='name_business', evaluate=True):
    """ Train the model using collaborative filtering.
    Args:
        df: the input dataframe.
        user_id_col: user id column.
        item_id_col: item id column.
        item_name_col: item name column.
        evaluate: if evaluate the model performance.
    Returns:
        model_full: the trained model.
        df_interactions: dataframe with user-item interactions.
        user_dict: user dictionary containing user_id as key and
            interaction_index as value.
        item_dict: item dictionary containing item_id as key and
            item_name as value.
        user_feature_map: the feature map of users
        business_feature_map: the feature map of items
    """
    if evaluate:
        print('Evaluating model...')
        evaluate_model(df, user_id_col='user_id', item_id_col='business_id')
    print('Training model...')

    # build recommendations for known users and known businesses
    # with collaborative filtering method
    ds_full = Dataset()
    # we call fit to supply userid, item id and user/item features
    user_cols = ['user_id', 'average_stars']
    categories = [c for c in df.columns if c[0].isupper()]
    item_cols = ['business_id', 'state']

    for i in df.columns[10:]:
        item_cols.append(str(i))

    user_features = user_cols[1:]
    item_features = item_cols[2:]

    ds_full.fit(
        df[user_id_col].unique(),  # all the users
        df[item_id_col].unique(),  # all the items
        user_features=user_features,  # additional user features
        item_features=item_features
         )

    df_users = df.drop_duplicates(user_id_col)
    # df_users = df[df.duplicated(user_id_col) == False]
    users_features = []
    for i in range(len(df_users)):
        users_features.append(get_users_features_tuple(df_users.values[i]))
    users_features = ds_full.build_user_features(
        users_features, normalize=False)

    items = df.drop_duplicates(item_id_col)
    # items = df[df.duplicated(item_id_col) == False]
    items_features = []
    for i in range(len(items)):
        items_features.append(get_items_features_tuple(
            items.values[i], categories))
    items_features = ds_full.build_item_features(
        items_features, normalize=False)

    (interactions, weights) = ds_full.build_interactions(
        [(x[0], x[1], x[2]) for x in df.values])
    # model
    model_full = LightFM(
        no_components=100, learning_rate=0.05, loss='warp', max_sampled=50)
    model_full.fit(
        interactions, user_features=users_features,
        item_features=items_features, sample_weight=weights,
        epochs=10, num_threads=10)
    # mapping
    user_id_map, user_feature_map, business_id_map, business_feature_map = \
        ds_full.mapping()

    # data preparation
    df_interactions = pd.DataFrame(weights.todense())
    df_interactions.index = list(user_id_map.keys())
    df_interactions.columns = list(business_id_map.keys())
    user_dict = user_id_map
    item_dict = df.set_index(item_id_col)[item_name_col].to_dict()
    return model_full, df_interactions, user_dict, \
        item_dict, user_feature_map, business_feature_map
예제 #18
0
def lambda_handler(event, context):
    try:
        ## Fetch data from RDS code
        connection = pymysql.connect(
            host='fitbookdb.crm91a2epcbi.us-east-1.rds.amazonaws.com',
            user='******',
            passwd='postgres',
            db='fitbookdb',
            cursorclass=pymysql.cursors.DictCursor)

        print("Connection successful")
    except:
        print("Connection error")

    # In[3]:

    #Get Food DataFrame
    dict_list = []

    with connection.cursor() as cur:
        cur.execute("select * from food_dataset")
        for row in cur:
            dict_list.append(row)

    food_rds_df = pd.DataFrame(dict_list)
    food_df = food_rds_df.copy()
    food_df.drop([
        'Portion_Default', 'Portion_Amount', 'Factor', 'Increment',
        'Multiplier', 'Portion_Display_Name', 'Food_Code', 'Display_Name'
    ],
                 axis=1,
                 inplace=True)
    # food_df.head()
    print('Food Dataframe imported')

    # In[4]:

    # # TODO: Perform Binning
    # food_30_bins = ['Alcohol', 'Calories', 'Saturated_Fats']
    # for each_column in food_30_bins:
    #     bins = np.linspace(food_df[each_column].min(), food_df[each_column].max(), 30)
    #     food_df[each_column+'bin'] = pd.cut(food_df[each_column], bins, labels=np.arange(0,len(bins)-1))
    # food_df

    # In[5]:

    # for each_column in food_30_bins:
    #     print(food_df[each_column].min())

    # In[6]:

    #Get User Dataframe
    # user_df = pd.read_csv('user_db_try.csv')
    # user_df.head()

    dict_list = []

    with connection.cursor() as cur:
        cur.execute("select * from tblUserData")
        for row in cur:
            dict_list.append(row)

    user_rds_df = pd.DataFrame(dict_list)
    user_df = user_rds_df.copy()
    user_df.drop([
        'cognitoAccessToken', 'cognitoIDToken', 'cognitoRefreshToken',
        'fitbitAccessToken', 'fitbitUserID', 'userName'
    ],
                 axis=1,
                 inplace=True)
    # user_df.head()

    print('User Dataframe imported')

    # In[7]:

    #Get userItem DataFrame
    # userItem_df = pd.read_csv('userItem_db_try_new.csv')
    # userItem_df.head()

    dict_list = []

    with connection.cursor() as cur:
        cur.execute("select * from tblUserRating")
        for row in cur:
            dict_list.append(row)

    userItem_rds_df = pd.DataFrame(dict_list)
    userItem_df = userItem_rds_df.copy()
    # userItem_df.head()
    print('UserItem Dataframe imported')

    # In[8]:

    #Make all the feature values unique
    for column_name in food_df.columns:
        if column_name != 'food_ID':
            food_df[column_name] = str(
                column_name) + ":" + food_df[column_name].astype(str)
    # food_df.head()

    # In[9]:

    #This Dict will be useful while creating tupples
    food_features_df = food_df.drop(['food_ID'], axis=1).copy()
    food_features_dict = food_features_df.to_dict('split')
    # food_features_dict

    # In[10]:

    food_feature_values = []

    for column_name in food_features_df.columns:
        food_feature_values.extend(food_features_df[column_name].unique())

    # food_feature_values

    # In[11]:

    for column_name in user_df.columns:
        if column_name != 'userID':
            user_df[column_name] = str(
                column_name) + ":" + user_df[column_name].astype(str)

    user_features_df = user_df.drop(['userID'], axis=1).copy()

    user_features_dict = user_features_df.to_dict('split')
    # user_features_dict

    # In[12]:

    user_feature_values = []

    for column_name in user_features_df.columns:
        user_feature_values.extend(user_features_df[column_name].unique())

    # user_feature_values

    # In[13]:

    user_tuples = []
    food_tuples = []

    for index, row in user_df.iterrows():
        user_tuples.append((row['userID'], user_features_dict['data'][index]))

    for index, row in food_df.iterrows():
        food_tuples.append((row['food_ID'], food_features_dict['data'][index]))

    # food_tuples

    # In[14]:

    print("Creating LightFm dataset")
    dataset = Dataset()
    dataset.fit(users=(user_id for user_id in user_df['userID']),
                items=(food_id for food_id in food_df['food_ID']))

    print("Dataset Created")
    # In[15]:

    num_users, num_items = dataset.interactions_shape()
    print('Num users: {}, num_items {}.'.format(num_users, num_items))

    # In[16]:

    # dataset.fit_partial(items=(food_id for food_id in food_df['Food_Code']),
    #                            item_features=((each_feature for each_feature in food_features)for food_features in food_features_dict['data']))

    # In[17]:

    # dataset.fit_partial(items=(food_id for food_id in food_df['Food_Code']),
    #                            item_features=((row['Milk'], row['Meats'], row['Alcohol'], row['Calories'])for index,row in food_df.iterrows()))

    # In[18]:

    print("fittng item partial features")
    dataset.fit_partial(items=(food_id for food_id in food_df['food_ID']),
                        item_features=(each_value
                                       for each_value in food_feature_values))

    # In[19]:

    # dataset.fit_partial(users=(user_id for user_id in user_df['Id']),
    #                     user_features=((each_feature for each_feature in user_features)for user_features in user_features_dict['data']))

    # In[20]:
    print("fittng user partial features")

    dataset.fit_partial(users=(user_id for user_id in user_df['userID']),
                        user_features=(each_value
                                       for each_value in user_feature_values))

    # In[21]:

    # dataset.item_features_shape()
    # dataset.user_features_shape()

    # In[22]:

    print("Building Interactions")
    (interactions, weights) = dataset.build_interactions(
        ((x['userID'], x['food_ID'], x['rating'])
         for y, x in userItem_df.iterrows()))

    # print(repr(interactions))
    # print(weights)

    # In[23]:

    # interactions.shape

    # In[24]:

    print("Building item features")
    item_features = dataset.build_item_features(each_tuple
                                                for each_tuple in food_tuples)
    # print(item_features)

    # In[25]:

    user_features = dataset.build_user_features(each_tuple
                                                for each_tuple in user_tuples)
    # print(user_features)

    # In[26]:

    print("Fitting Model")
    model = LightFM(loss='warp')
    model.fit(interactions,
              item_features=item_features,
              user_features=user_features)

    print("Model trained!!")

    print("Pickle started!!")
    pickle.dump(model, open("/tmp/model.pkl", 'wb'), protocol=2)

    bucketName = "fitbook-lambda-packages"
    Key = "/tmp/model.pkl"
    outPutname = "model.pkl"

    print("Uploading to S3")
    s3 = boto3.client('s3')
    s3.upload_file(Key, bucketName, outPutname)
    print("Upload done")
    os.remove("/tmp/model.pkl")

    print("Pickle file deleted")
    print("Successssss!!!!!")
예제 #19
0
def evaluate_model(
                  df, user_id_col='user_id',
                  item_id_col='business_id', stratify=None):
    """ Model evaluation.
    Args:
        df: the input dataframe.
        user_id_col: user id column.
        item_id_col: item id column.
        stratify: if use stratification.
    No return value
    """
    # create test and train datasets
    print('model evaluation')
    train, test = train_test_split(df, test_size=0.2, stratify=stratify)
    ds = Dataset()
    # we call fit to supply userid, item id and user/item features
    user_cols = ['user_id', 'average_stars']
    categories = [c for c in df.columns if c[0].isupper()]
    item_cols = ['business_id', 'state']

    for i in df.columns[10:]:
        item_cols.append(str(i))

    user_features = user_cols[1:]
    item_features = item_cols[2:]

    ds.fit(
        df[user_id_col].unique(),  # all the users
        df[item_id_col].unique(),  # all the items
        user_features=user_features,  # additional user features
        item_features=item_features
         )

    train_users = train.drop_duplicates('user_id')
    # train_users = train[train.duplicated('user_id') == False]
    train_user_features = []
    for i in range(len(train_users)):
        train_user_features.append(get_users_features_tuple(
            train_users.values[i]))
    train_user_features = ds.build_user_features(
        train_user_features, normalize=False)

    test_users = test.drop_duplicates('user_id')
    # test_users = test[test.duplicated('user_id') == False]
    test_user1_features = []
    for i in range(len(test_users)):
        test_user1_features.append(get_users_features_tuple(
            test_users.values[i]))
    test_user_features = ds.build_user_features(
        test_user1_features, normalize=False)

    train_items = train.drop_duplicates('business_id')
    # train_items = train[train.duplicated('business_id') == False]
    train_item1_features = []
    for i in range(len(train_items)):
        train_item1_features.append(get_items_features_tuple(
            train_items.values[i], categories))
    train_item_features = ds.build_item_features(
        train_item1_features, normalize=False)

    test_items = test.drop_duplicates('business_id')
    # test_items = test[test.duplicated('business_id') == False]
    test_item_features = []
    for i in range(len(test_items)):
        test_item_features.append(get_items_features_tuple(
            test_items.values[i], categories))
    test_item_features = ds.build_item_features(
        test_item_features, normalize=False)

    # plugging in the interactions and their weights
    (train_interactions, train_weights) = ds.build_interactions(
        [(x[0], x[1], x[2]) for x in train.values])
    (test_interactions, test_weights) = ds.build_interactions(
        [(x[0], x[1], x[2]) for x in test.values])

    # model
    model = LightFM(
        no_components=100, learning_rate=0.05, loss='warp', max_sampled=50)
    model.fit(
        train_interactions, user_features=train_user_features,
        item_features=train_item_features, sample_weight=train_weights,
        epochs=10, num_threads=10)

    # auc-roc
    train_auc = auc_score(
        model, train_interactions, user_features=train_user_features,
        item_features=train_item_features, num_threads=20).mean()
    print('Training set AUC: %s' % train_auc)
    test_auc = auc_score(
        model, test_interactions, user_features=test_user_features,
        item_features=test_item_features, num_threads=20).mean()
    print('Testing set AUC: %s' % test_auc)
예제 #20
0
def calc(request):
    try :
        stores =  Store.objects.all();
        reviews = Review.objects.all();

        stores = pd.DataFrame(list(stores.values('id', 'store_id','store_name', 'category',
        'address','latitude','longitude','average_rating')))
        reviews = pd.DataFrame(list(reviews.values('id', 'storeid','userid', 'score','reg_time')))

        reviews_source = [(reviews['userid'][i], reviews['storeid'][i]) for i in range(reviews.shape[0])]
        item_feature_source = [(stores['store_id'][i], [ stores['category'][i],stores['address'][i],stores['latitude'][i],stores['longitude'][i], stores['average_rating'][i]] ) for i in range(stores.shape[0]) ]

        dataset = Dataset()
        dataset.fit(users=reviews['userid'].unique(),
            items=reviews['storeid'].unique(),
            item_features=stores[stores.columns[1:]].values.flatten())

        interactions, weights = dataset.build_interactions(reviews_source)
        item_features = dataset.build_item_features(item_feature_source)

        # Split Train, Test data
        train, test = random_train_test_split(interactions, test_percentage=0.1)
        train, test = train.tocsr().tocoo(), test.tocsr().tocoo()
        train_weights = train.multiply(weights).tocoo()

        # Define Search Space
        trials = Trials()
        space = [hp.choice('no_components', range(10, 50, 10)), hp.uniform('learning_rate', 0.01, 0.05)]

        # Define Objective Function
        def objective(params):
            no_components, learning_rate = params
            global model
            model = LightFM(no_components=no_components,
                            learning_schedule='adagrad',
                            loss='warp',
                            learning_rate=learning_rate,
                            random_state=0)

            model.fit(interactions=train,
                    item_features=item_features,
                    sample_weight=train_weights,
                    epochs=3,
                    verbose=False)

            test_precision = precision_at_k(model, test, k=5, item_features=item_features).mean()
            print("no_comp: {}, lrn_rate: {:.5f}, precision: {:.5f}".format(
            no_components, learning_rate, test_precision))
            # test_auc = auc_score(model, test, item_features=item_features).mean()
            output = -test_precision

            if np.abs(output+1) < 0.01 or output < -1.0:
                output = 0.0

            return output

        # max_evals가 몇번 반복실행 할껀지. 
        best_params = fmin(fn=objective, space=space, algo=tpe.suggest, max_evals=10, trials=trials)

        # 아이템피쳐 저장        
        with open('./saved_models/item_features.pickle', 'wb') as fle:
            pickle.dump(item_features, fle, protocol=pickle.HIGHEST_PROTOCOL)

        # 모델 저장해야 됨
        with open('./saved_models/model.pickle', 'wb') as fle:
            pickle.dump(model, fle, protocol=pickle.HIGHEST_PROTOCOL)

        item_biases, item_embeddings = model.get_item_representations(features=item_features)
        # item_embeddings 저장하기 
        with open('./saved_models/item_embeddings.pickle', 'wb') as fle:
            pickle.dump(item_embeddings, fle, protocol=pickle.HIGHEST_PROTOCOL)
        
        return Response({'result': True}) 
    
    except :
        return Response({'result': False}) 
예제 #21
0
print('Num users : {}, num_items {}.'.format(num_users, num_items))

# add some item feature mappings, and creates a unique feature for each author
# NOTE: more item ids are fitted than usual, to make sure our mappings are complete
# even if there are items in the features dataset that are not in the interaction set
dataset.fit_partial(items=(x['ISBN'] for x in get_book_features()),
                    item_features=(x['Book-Author']
                                   for x in get_book_features()))

# build the interaction matrix which is a main input to the LightFM model
# it encodes the interactions between the users and the items
(interactions, weights) = dataset.build_interactions(
    ((x['User-ID'], x['ISBN']) for x in get_ratings()))

# item_features matrix can also be created
item_features = dataset.build_item_features(
    ((x['ISBN'], [x['Book-Author']]) for x in get_book_features()))

# split the current dataset into a training and test dataset
train, test = random_train_test_split(interactions,
                                      test_percentage=0.01,
                                      random_state=None)

# build the model using the training dataset, notice the use of item_features as well,
# this is a hybrid model
model = LightFM(loss='warp',
                item_alpha=ITEM_ALPHA,
                no_components=NUM_COMPONENTS)

# train the hybrid model on the training dataset
model.fit(train, item_features=item_features, epochs=NUM_EPOCHS, num_threads=1)
예제 #22
0
def run_lightfm(ratings, train, test, k_items, dataset):
    def create_interaction_matrix(df,
                                  user_col,
                                  item_col,
                                  rating_col,
                                  norm=False,
                                  threshold=None):
        '''
        Function to create an interaction matrix dataframe from transactional type interactions
        Required Input -
            - df = Pandas DataFrame containing user-item interactions
            - user_col = column name containing user's identifier
            - item_col = column name containing item's identifier
            - rating col = column name containing user feedback on interaction with a given item
            - norm (optional) = True if a normalization of ratings is needed
            - threshold (required if norm = True) = value above which the rating is favorable
        Expected output -
            - Pandas dataframe with user-item interactions ready to be fed in a recommendation algorithm
        '''
        interactions = df.groupby([user_col, item_col])[rating_col] \
                .sum().unstack().reset_index(). \
                fillna(0).set_index(user_col)
        if norm:
            interactions = interactions.applymap(lambda x: 1
                                                 if x > threshold else 0)
        return interactions

    test_interactions = create_interaction_matrix(df=test,
                                                  user_col='userId',
                                                  item_col='movieId',
                                                  rating_col='rating')

    budget_l = dataset.budget.unique().tolist()
    gross_l = dataset.gross.unique().tolist()
    awards_l = dataset.awards.unique().tolist()
    nom_l = dataset.nominations.unique().tolist()
    votes_l = dataset.votes.unique().tolist()
    item_ids = np.unique(train.movieId.astype(int))
    print(f'length dataset: {len(dataset)}')
    dataset = dataset[dataset.movieId.isin(item_ids)]
    print(f'length dataset: {len(dataset)}')
    item_features_list = [f'rating_{f}' for f in range(11)]
    gen = [
        'Action', 'Adventure', 'Animation', "Children's", 'Comedy', 'Crime',
        'Documentary', 'Drama', 'Fantasy', 'Film-Noir', 'Horror', 'IMAX',
        'Musical', 'Mystery', 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western'
    ]  # 'unknown' add unknown for movielens100k
    item_features_list += gen
    item_features_list += budget_l
    item_features_list += gross_l
    item_features_list += awards_l
    item_features_list += nom_l
    item_features_list += votes_l
    item_features = []
    for y, x in dataset.iterrows():
        genres = x['genres']
        tmp_row = (int(x['movieId']), [
            x['rating'], x['budget'], x['gross'], x['awards'],
            x['nominations'], x['votes']
        ])
        for g in genres:
            tmp_row[1].append(g)
        item_features.append(tmp_row)
    #item_features = [(int(x['movieId']), [x['rating'], z, x['budget'], x['gross'], x['awards'], x['votes']]) for y, x in dataset.iterrows() for z in x['genres']] #x['nominations']
    user_ids = np.unique(train.userId)
    built_dif = Dataset()
    built_dif.fit_partial(users=user_ids)
    built_dif.fit_partial(items=item_ids)
    built_dif.fit_partial(item_features=item_features_list)
    dataset_item_features = built_dif.build_item_features(item_features)
    (interactions, weights) = built_dif.build_interactions(
        ((int(x['userId']), int(x['movieId'])) for y, x in train.iterrows()))
    modelx = LightFM(no_components=30, loss='bpr', k=15, random_state=1)
    modelx.fit(interactions,
               epochs=30,
               num_threads=4,
               item_features=dataset_item_features
               )  #item_features=dataset_item_features
    test = sparse.csr_matrix(test_interactions.values)
    test = test.tocoo()
    num_users, num_items = built_dif.interactions_shape()
    print('Num users: {}, num_items {}.'.format(num_users, num_items))

    prec_list = dict()
    rec_list = dict()

    for num_k in k_items:
        trainprecision = precision_at_k(
            modelx, test, k=num_k, item_features=dataset_item_features).mean(
            )  #item_features=dataset_item_features,
        print('Hybrid training set precision: %s' % trainprecision)
        trainrecall = recall_at_k(modelx,
                                  test,
                                  k=num_k,
                                  item_features=dataset_item_features).mean(
                                  )  #item_features=dataset_item_features
        print('Hybrid training set recall: %s' % trainrecall)
        if num_k in prec_list:
            prec_list[num_k].append(trainprecision)
        else:
            prec_list[num_k] = trainprecision

        if num_k in rec_list:
            rec_list[num_k].append(trainrecall)
        else:
            rec_list[num_k] = trainrecall

    return prec_list, rec_list
예제 #23
0
for rid, row in movies.iterrows():
    for m in match_lst:
        if m.lower() in row[1].lower():
            matches.append(row[0])

print(good_ratings.head())
rating_iter = zip(good_ratings['userId'], good_ratings['movieId'])
new_iter = ((new_user, x) for x in matches)
interactions, weights = dataset.build_interactions(chain(
    rating_iter, new_iter))

print(repr(interactions))
mov_features = ((row[0], row[2].split('|') + [row[3], row[0]])
                for rid, row in movies.iterrows())
# print(mov_features[0])
item_features = dataset.build_item_features(mov_features)

model = LightFM(loss='warp',
                no_components=28,
                item_alpha=0.0001,
                learning_rate=0.05)
model.fit(interactions, item_features=item_features, num_threads=16)

movie2name = {}
for rid, row in movies.iterrows():
    movie2name[row[0]] = row[1]

n_users, n_items = dataset.interactions_shape()
# Adjust using base ratings
base_mat = model.predict(0, np.arange(n_items), num_threads=16)
base_mat = (base_mat + np.min(base_mat))
              item_features=range(2048))
    # Build Train Interactions
    (train_interactions, weights) = train.build_interactions(
        ((row[0], row[1]) for index, row in df_train.iterrows()))
    # Build Features
    ## Call build_user/item_features with iterables of (user/item id, [features]) or (user/item id, {feature: feature weight}) to build feature matrices.
    print('Loading Features...')
    list_features = {}
    for image_index, image_feature in enumerate(features):
        list_features[image_index] = []
        for feature_index, feature_weight in enumerate(image_feature):
            list_features[image_index].append({feature_index: feature_weight})

    features_generator = ((item_id, ele) for item_id in list_features.keys()
                          for ele in list_features[item_id])
    item_features = train.build_item_features(features_generator,
                                              normalize=False)
    print('End Loading Features.')

    ### LOAD

    print('Load Model...')
    with open(weight_directory + '_step{0}_LFM.pickle'.format(args.epoch),
              'rb') as dump:
        model = pickle.load(dump)
    print('End Model')

    # # Evaluation
    print("Evaluation...")
    with open(
            result_directory +
            '_top{0}_ep{1}_LFM.tsv'.format(args.topk, args.epoch), 'w') as out:
예제 #25
0
def main():

	if request.method == 'POST':
		global df_movies
		# global top_trending_ids
		# print(list(df_movies[df_movies.movie_id_ml.isin(top_trending_ids)].title) )
		print(request.form)
		# Get recommendations!
		if 'run-mf-model' in request.form:
			
			for i, user_rating in enumerate(session['arr']):
				session['arr'][i] = user_rating[:-2]
			session['movieIds'] = session['movieIds'][:-2]
			rated_movies = min(len(session['arr'][0]), len(session['movieIds']))
			for i, user_rating in enumerate(session['arr']):
				session['arr'][i] = user_rating[:rated_movies]
			session['movieIds'] = session['movieIds'][:rated_movies]

			pu = recommendation_mf(session['arr'], session['members'], session['movieIds'])


			session.clear()
			top_trending_ids = list(df_movies.sort_values(by="trending_score").head(200).sample(15).movie_id_ml)
			session['counter'] = 0
			session['members'] = 0
			session['userAges'] = []
			session['userGenders'] = []
			session['movieIds'] = list(df_movies[df_movies.movie_id_ml.isin(top_trending_ids)].movie_id_ml)
			session['top15'] = list(df_movies[df_movies.movie_id_ml.isin(top_trending_ids)].title) 
			session['top15_posters'] = list(df_movies[df_movies.movie_id_ml.isin(top_trending_ids)].poster_url)
			session['arr'] = None
			return(render_template('main.html', settings = {'friendsInfo':False, 'showVote': False, 'people': 0, 'buttonDisable': False,'chooseRecommendation':False, 'recommendation': pu}))
		
		if 'run-siamese-model' in request.form:
			# global df
			global friends
			global ratings
			global new_friend_id
			new_ratings = []
			for mid, movie_real_id in enumerate(session['movieIds']):
				avg_mv_rating = np.median(np.array([user_ratings[mid] for user_ratings in session['arr']]))
				new_ratings.append({'movie_id_ml':movie_real_id, 
									'rating': avg_mv_rating,
									'friend_id': new_friend_id}) 
			new_friend = {'friend_id': new_friend_id, 'friends_age': np.mean(np.array(session['userAges'])), 'friends_gender': np.mean(np.array(session['userGenders']))}	

			friends.append(new_friend)
			ratings.extend(new_ratings)

			dataset = LightFMDataset()
			item_str_for_eval = "x['title'],x['release'], x['unknown'], x['action'], x['adventure'],x['animation'], x['childrens'], x['comedy'], x['crime'], x['documentary'], x['drama'],  x['fantasy'], x['noir'], x['horror'], x['musical'],x['mystery'], x['romance'], x['scifi'], x['thriller'], x['war'], x['western'], *soup_movie_features[x['soup_id']]"
			friend_str_for_eval = "x['friends_age'], x['friends_gender']"

			dataset.fit(users=(int(x['friend_id']) for x in friends),
						items=(int(x['movie_id_ml']) for x in movies),
						item_features=(eval("("+item_str_for_eval+")") for x in movies),
						user_features=((eval(friend_str_for_eval)) for x in friends))
			num_friends, num_items = dataset.interactions_shape()
			print(f'Num friends: {num_friends}, num_items {num_items}. {datetime.datetime.now()}')

			(interactions, weights) = dataset.build_interactions(((int(x['friend_id']), int(x['movie_id_ml']))
													  for x in ratings))
			item_features = dataset.build_item_features(((x['movie_id_ml'], 
											  [eval("("+item_str_for_eval+")")]) for x in movies) )
			user_features = dataset.build_user_features(((x['friend_id'], 
											  [eval(friend_str_for_eval)]) for x in friends) )

			print(f"Item and User features created {datetime.datetime.now()}")

			epochs = 50 #150
			lr = 0.015
			max_sampled = 11

			loss_type = "warp"  # "bpr"


			model = LightFM(learning_rate=lr, loss=loss_type, max_sampled=max_sampled)

			model.fit_partial(interactions, epochs=epochs, user_features=user_features, item_features=item_features)
			train_precision = precision_at_k(model, interactions, k=10, user_features=user_features, item_features=item_features).mean()

			train_auc = auc_score(model, interactions, user_features=user_features, item_features=item_features).mean()

			print(f'Precision: {train_precision}, AUC: {train_auc}, {datetime.datetime.now()}')

			k = 18
			top_movie_ids, scores = predict_top_k_movies(model, new_friend_id, k, num_items, user_features=user_features, item_features=item_features, use_features = False)
			top_movies = df_movies[df_movies.movie_id_ml.isin(top_movie_ids)]

			pu = recommendation_siamese(top_movies, scores)

			return(render_template('main.html', settings = {'friendsInfo':False, 'showVote': False, 'people': 0, 'buttonDisable': False,'chooseRecommendation':False, 'recommendation': pu}))
		
		# Collect friends info
		elif 'person-select-gender-0' in request.form:
			for i in range(session['members']):
				session['userAges'].append(int(request.form.get(f'age-{i}')))
				session['userGenders'].append(int(request.form.get(f'person-select-gender-{i}')))

			return(render_template('main.html', settings = {'friendsInfo':False, 'showVote': True, 'people': session['members'], 'buttonDisable': True,'chooseRecommendation':False, 'recommendation': None}))

		# Choose number of people in the group
		elif 'people-select' in request.form:
			count = int(request.form.get('people-select'))
			session['members'] = count
			session['arr'] = [[0 for x in range(15)] for y in range(count)] 
			return(render_template('main.html', settings = {'friendsInfo':True, 'showVote': False, 'people': count, 'buttonDisable': True,'chooseRecommendation':False, 'recommendation': None}))

		# All people voting
		elif 'person-select-0' in request.form:
			for i in range(session['members']):
				session['arr'][i][session['counter']] = int(request.form.get(f'person-select-{i}'))
			
			session['counter'] += 1 
			if session['counter'] < 15:     
				return(render_template('main.html', settings = {'friendsInfo':False, 'showVote': True, 'people': len(request.form), 'buttonDisable': True,'chooseRecommendation':False, 'recommendation': None}))
			else:
				return(render_template('main.html', settings = {'friendsInfo':False, 'showVote': False, 'people': len(request.form), 'buttonDisable': True,'chooseRecommendation':True,  'recommendation': None}))

	elif request.method == 'GET':
		session.clear()
		top_trending_ids = list(df_movies.sort_values(by="trending_score").head(200).sample(15).movie_id_ml)
		print(top_trending_ids)
		print(list(df_movies[df_movies.movie_id_ml.isin(top_trending_ids)].title) )
		session['counter'] = 0
		session['members'] = 0
		session['userAges'] = []
		session['userGenders'] = []
		session['movieIds'] = list(df_movies[df_movies.movie_id_ml.isin(top_trending_ids)].movie_id_ml) 
		session['top15'] = list(df_movies[df_movies.movie_id_ml.isin(top_trending_ids)].title) 
		session['top15_posters'] = list(df_movies[df_movies.movie_id_ml.isin(top_trending_ids)].poster_url)
		session['arr'] = None

		return(render_template('main.html', settings = {'showVote': False, 'people': 0, 'buttonDisable': False, 'recommendation': None}))
예제 #26
0
items = pd.read_csv('items.txt', sep=';', error_bad_lines=False, header=None)
users = pd.read_csv('usersDescription.txt', sep=';', header=None)
ratings = pd.read_csv('ratings.txt', sep=';', header=None)

from lightfm.data import Dataset

dataset = Dataset(user_identity_features=True, item_identity_features=True)
dataset.fit(users=(users[50].unique()),
            items=(items[0]),
            item_features=list(range(2, 10)),
            user_features=list(range(2, 50)))

items_features_raw = list(
    (item[1], (np.argwhere(np.array(item[3:]) == 1)[0] + 2).tolist())
    for item in items.itertuples())
items_features = dataset.build_item_features(items_features_raw)
users_features_raw = build_user_dict(users)
users_features = dataset.build_user_features(users_features_raw)

num_users, num_items = dataset.interactions_shape()
print('Num users: {}, num_items {}.'.format(num_users, num_items))

ratings2 = ratings[ratings[2] > 0]
ratings2 = ratings2.drop_duplicates(subset=[1, 2, 3])
train, test = train_test_split(ratings2, test_size=0.1)
print(train.shape)
print(test.shape)

(train_interactions,
 train_weights) = dataset.build_interactions(train[[3, 1]].values)
(test_interactions, test_weights) = dataset.build_interactions(test[[3, 1
    def obtener_matrices_gui(self, ruta_ratings, sep_ratings, encoding_ratings,
                             ruta_users, sep_users, encoding_users, ruta_items,
                             sep_items, encoding_items):
        """
        Método obtener_matrices_gui. Obtiene las matrices necesarias para la creación de los modelos de LightFM.

        Este método solo se utiliza en la interfaz web.

        Parameters
        ----------

        ruta_ratings: str
            ruta del archivo que contiene las valoraciones.
        sep_ratings: str
            separador utilizado en el archivo de valoraiones.
        encoding_ratings: str
            encoding utilizado en el archivo de valoraciones.
        ruta_users: str
            ruta del archivo que contiene los datos de los usuarios.
        sep_users: str
            separador utilizado en el archivo de usuarios.
        encoding_users: str
            encoding utilizado en el archivo de usuarios.
        ruta_items: str
            ruta del archivo que contiene los datos de los ítems.
        sep_items: str
            separador utilizado en el archivo de ítems.
        encoding_items: str
            encoding utilizado en el archivo de ítems.
        """

        global train, test, item_features, user_features

        # Se obtienen los dataframes
        ratings_df = Entrada.leer_csv(ruta_ratings, sep_ratings,
                                      encoding_ratings)
        ratings_df.sort_values(
            [ratings_df.columns.values[0], ratings_df.columns.values[1]],
            inplace=True)
        users_df = Entrada.leer_csv(ruta_users, sep_users, encoding_users)
        users_df.sort_values([users_df.columns.values[0]], inplace=True)
        items_df = Entrada.leer_csv(ruta_items, sep_items, encoding_items)
        items_df.sort_values([items_df.columns.values[0]], inplace=True)

        # Se transforman los dataframes en matrices que puedan ser utilzadas por los modelos
        dataset = Dataset()
        dataset.fit(users_df[users_df.columns.values[0]],
                    items_df[items_df.columns.values[0]],
                    user_features=users_df[users_df.columns.values[1]],
                    item_features=items_df[items_df.columns.values[1]])

        # Si el modelo es colaborativo o híbrido se tienen en cuenta las valoraciones de los usuarios
        if self.opcion_modelo == 1 or self.opcion_modelo == 2:
            (interacciones, pesos) = dataset.build_interactions(
                (row[ratings_df.columns.values[0]],
                 row[ratings_df.columns.values[1]],
                 row[ratings_df.columns.values[2]])
                for index, row in ratings_df.iterrows())
        else:
            (interacciones, pesos) = dataset.build_interactions(
                (row[ratings_df.columns.values[0]],
                 row[ratings_df.columns.values[1]])
                for index, row in ratings_df.iterrows())

        # Se obtienen las matrices de features y se guardan
        item_features = dataset.build_item_features(
            (row[items_df.columns.values[0]],
             [row[items_df.columns.values[1]]])
            for index, row in items_df.iterrows())
        user_features = dataset.build_user_features(
            (row[users_df.columns.values[0]],
             [row[users_df.columns.values[1]]])
            for index, row in users_df.iterrows())
        print("Guarda la matriz de item features")
        guardar_datos_pickle(item_features, 'la matriz de item features')
        print("Guarda la matriz de user features")
        guardar_datos_pickle(user_features, 'la matriz de user feautures')

        # Se dividen las interacciones en conjuntos de entrenamiento y test y se guardan
        train, test = random_train_test_split(interacciones,
                                              test_percentage=0.2)
        print("Guarda la matriz de entrenamiento")
        guardar_datos_pickle(train, 'la matriz de entrenamiento')
        print("Guarda la matriz de test")
        guardar_datos_pickle(test, 'la matriz de test')
#This will create a feature for every unique author name in the dataset.

#(Note that we fit some more item ids: this is to make sure our mappings are complete even if there are items in the features dataset that are not in the interactions set.)

## Building the interactions matrix

#Having created the mapping, we build the interaction matrix:

(interactions, weights) = dataset.build_interactions(
    ((x['User-ID'], x['ISBN']) for x in get_ratings()))
print(repr(interactions))

#This is main input into a LightFM model: it encodes the interactions betwee users and items.

#Since we have item features, we can also create the item features matrix:
item_features = dataset.build_item_features(
    ((x['ISBN'], [x['Book-Author']]) for x in get_book_features()))
print(repr(item_features))

## Building a model

#This is all we need to build a LightFM model:

model = LightFM(loss='bpr')

model.fit(interactions, item_features=item_features)

#trying to put own csv files into lightFM dataset


def get_own_data():
예제 #29
0
        dataset.fit_partial(item_features=(x[str(fields[i])] for x in winefeatures))

    num_users, num_items = dataset.interactions_shape()
    
    #building the interaction matrix for training ratings
    (interactions, weights) = dataset.build_interactions(((x['taster'],x['title']) for x in trainrankings))

    #and the corresponding sparse matrices for CV and Test ratings
    (testinteractions, testweights) = dataset.build_interactions(((x['taster'],x['title']) for x in testrankings))
    (cvinteractions, cvweights) = dataset.build_interactions(((x['taster'],x['title']) for x in cvrankings))

    #here we need to remove title so our next iterator works properly
    fields.remove('title')
    #double list comprehension to build the item features in a smart way, providing each feature in wine features
    #which is >200
    item_features = dataset.build_item_features((x['title'],[x[field] for field in fields[1:]]) for x in winefeatures)

    #uncomment below to run randomized optimization
    #yieldlist = list(randomsearch(interactions, cvinteractions, item_features))
    #(score, hyperparams, model) = max(yieldlist, key=lambda x: x[0])
    #print("Best score {} at {}".format(score, hyperparams))
    #print(yieldlist)
    

    #Below are the results of our random optimiaztion, hardcoded as parameters now.
    #Best score 0.9843319654464722 at
    bestparams = {'no_components': 59,
    'learning_schedule': 'adagrad', 'loss': 'warp', 'learning_rate': 0.08565020895037347,
     'item_alpha': 7.345729662383957e-10, 'user_alpha': 4.776609106732949e-09,
     'max_sampled': 14, 'random_state':69}
    model = LightFM(**bestparams)
예제 #30
0
            user_features=user_feature_names,
            item_features=item_feature_names
            )

# check shape
num_users, num_items = dataset.interactions_shape()
print('Num users: {}, num_items: {}.'.format(num_users, num_items))
_, num_users_feature = dataset.user_features_shape()
_, num_items_feature = dataset.item_features_shape()
print('Num users feature: {}, num_items feature: {}.'.format(num_users_feature, num_items_feature))

# build user feature matrix
user_feature_matrix = dataset.build_user_features(user_feature_iterable, normalize=True)

# build item feature matrix
item_feature_matrix = dataset.build_item_features(item_feature_iterable, normalize=True)

# build interaction
(train_interactions, weights) = dataset.build_interactions(data=((row['userCode'], row['project_id'], row[interaction_col_name])for index, row in train.iterrows() if row['project_id'] not in ignore_project))

from lightfm import LightFM

model = LightFM(loss='warp', random_state=44, learning_schedule='adagrad')
model.fit(train_interactions,
        item_features=item_feature_matrix,
        user_features=user_feature_matrix,
        )

is_evaluate=0
if is_evaluate:
    from lightfm.evaluation import precision_at_k