Exemplo n.º 1
0
    RatingPredictionOptimizer
from autorecsys.pipeline.preprocessor import MovielensPreprocessor
from autorecsys.recommender import RPRecommender

# logging setting
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# load dataset
##Netflix Dataset
# dataset_paths = ["./examples/datasets/netflix-prize-data/combined_data_" + str(i) + ".txt" for i in range(1, 5)]
# data = NetflixPrizePreprocessor(dataset_paths)

# Step 1: Preprocess data
movielens = MovielensPreprocessor()
train_X, train_y, val_X, val_y, test_X, test_y = movielens.preprocess()
train_X_categorical = movielens.get_x_categorical(train_X)
val_X_categorical = movielens.get_x_categorical(val_X)
test_X_categorical = movielens.get_x_categorical(test_X)
user_num, item_num = movielens.get_hash_size()

# Step 2: Build the recommender, which provides search space
# Step 2.1: Setup mappers to handle inputs
input = Input(shape=[2])
user_emb_gmf = LatentFactorMapper(column_id=0,
                                  num_of_entities=user_num,
                                  embedding_dim=64)(input)
item_emb_gmf = LatentFactorMapper(column_id=1,
                                  num_of_entities=item_num,
                                  embedding_dim=64)(input)
Exemplo n.º 2
0
    parser.add_argument('-data', type=str, help='dataset name')
    parser.add_argument('-data_path', type=str, help='dataset path')
    parser.add_argument('-sep', type=str, help='dataset sep')
    parser.add_argument('-search', type=str, help='input a search method name')
    parser.add_argument('-batch_size', type=int, help='batch size')
    parser.add_argument('-epochs', type=int, help='epochs')
    parser.add_argument('-early_stop', type=int, help='early stop')
    parser.add_argument('-trials', type=int, help='try number')
    args = parser.parse_args()
    # print("args:", args)
    if args.sep == None:
        args.sep = '::'

    # Load dataset
    if args.data == "ml":
        data = MovielensPreprocessor(args.data_path, sep=args.sep)
    if args.data == "netflix":
        dataset_paths = [
            args.data_path + "/combined_data_" + str(i) + ".txt"
            for i in range(1, 5)
        ]
        data = NetflixPrizePreprocessor(dataset_paths)
    data.preprocessing(val_test_size=0.1, random_state=1314)
    train_X, train_y = data.train_X, data.train_y
    val_X, val_y = data.val_X, data.val_y
    test_X, test_y = data.test_X, data.test_y
    user_num, item_num = data.user_num, data.item_num
    logging.info('train_X size: {}'.format(train_X.shape))
    logging.info('train_y size: {}'.format(train_y.shape))
    logging.info('val_X size: {}'.format(val_X.shape))
    logging.info('val_y size: {}'.format(val_y.shape))
Exemplo n.º 3
0
    parser.add_argument('-data_path', type=str, help='dataset path')
    parser.add_argument('-sep', type=str, help='dataset sep')
    parser.add_argument('-search', type=str, help='input a search method name')
    parser.add_argument('-batch_size', type=int, help='batch size')
    parser.add_argument('-epochs', type=int, help='epochs')
    parser.add_argument('-early_stop', type=int, help='early stop')
    parser.add_argument('-trials', type=int, help='try number')
    args = parser.parse_args()

    if args.sep == None:
        args.sep = '::'

    # Step 1: Preprocess data
    if args.data == "ml":
        data = MovielensPreprocessor(csv_path=args.data_path,
                                     validate_percentage=0.1,
                                     test_percentage=0.1)
        train_X, train_y, val_X, val_y, test_X, test_y = data.preprocess()
        train_X_categorical = data.get_x_categorical(train_X)
        val_X_categorical = data.get_x_categorical(val_X)
        test_X_categorical = data.get_x_categorical(test_X)
        user_num, item_num = data.get_hash_size()

    # Step 2: Build the recommender, which provides search space

    if args.model == 'mf':
        model = build_mf(user_num, item_num)
    if args.model == 'mlp':
        model = build_mlp(user_num, item_num)
    if args.model == 'gmf':
        model = build_gmf(user_num, item_num)
Exemplo n.º 4
0
from autorecsys.pipeline import Input, LatentFactorMapper, RatingPredictionOptimizer, HyperInteraction
from autorecsys.pipeline.preprocessor import MovielensPreprocessor
from autorecsys.recommender import RPRecommender

# logging setting
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# load dataset
##Netflix Dataset
# dataset_paths = ["./examples/datasets/netflix-prize-data/combined_data_" + str(i) + ".txt" for i in range(1, 5)]
# data = NetflixPrizePreprocessor(dataset_paths)

#Movielens 1M Dataset
data = MovielensPreprocessor("./examples/datasets/ml-1m/ratings.dat")

##Movielens 10M Dataset
# data = MovielensPreprocessor("./examples/datasets/ml-10M100K/ratings.dat")

##Movielens latest Dataset
# data = MovielensPreprocessor("./examples/datasets/ml-latest/ratings.csv", sep=',')

data.preprocessing(val_test_size=0.1, random_state=1314)
train_X, train_y = data.train_X, data.train_y
val_X, val_y = data.val_X, data.val_y
test_X, test_y = data.test_X, data.test_y
user_num, item_num = data.user_num, data.item_num
logger.info('train_X size: {}'.format(train_X.shape))
logger.info('train_y size: {}'.format(train_y.shape))
logger.info('val_X size: {}'.format(val_X.shape))
Exemplo n.º 5
0
 def test_MovielensPreprocessor(self):
     movielens = MovielensPreprocessor(csv_path=os.path.join(
         dataset_directory, 'movielens/ratings-10k.dat'))
     movielens.preprocess()
     assert movielens.data_df.shape == (10000, 3)