def test_testset_none():
    bm = BaseMethod(None, train_set=[], verbose=True)

    try:
        bm.evaluate(None, {}, False)
    except ValueError:
        assert True
Exemple #2
0
 def test_testset_none(self):
     bm = BaseMethod(None, verbose=True)
     bm.train_set = Dataset.from_uir(data=Reader().read("./tests/data.txt"))
     try:
         bm.evaluate(None, {}, False)
     except ValueError:
         assert True
Exemple #3
0
 def test_testset_none(self):
     bm = BaseMethod(None, verbose=True)
     bm.train_set = TrainSet(None, None)
     try:
         bm.evaluate(None, {}, False)
     except ValueError:
         assert True
Exemple #4
0
def test_testset_none():
    from cornac.data import TrainSet

    bm = BaseMethod(None, verbose=True)
    bm.train_set = TrainSet(None, None)

    try:
        bm.evaluate(None, {}, False)
    except ValueError:
        assert True
Exemple #5
0
    def test_organize_metrics(self):
        bm = BaseMethod()

        rating_metrics, ranking_metrics = bm._organize_metrics([MAE(), AUC()])
        self.assertEqual(len(rating_metrics), 1)  # MAE
        self.assertEqual(len(ranking_metrics), 1)  # AUC

        try:
            bm._organize_metrics(None)
        except ValueError:
            assert True
Exemple #6
0
def test_organize_metrics():
    from cornac.metrics import MAE, AUC

    bm = BaseMethod()

    rating_metrics, ranking_metrics = bm._organize_metrics([MAE(), AUC()])
    assert 1 == len(rating_metrics)  # MAE
    assert 1 == len(ranking_metrics)  # AUC

    try:
        bm._organize_metrics(None)
    except ValueError:
        assert True
Exemple #7
0
    def test_from_splits(self):
        data = Reader().read("./tests/data.txt")
        try:
            BaseMethod.from_splits(train_data=None, test_data=None)
        except ValueError:
            assert True

        try:
            BaseMethod.from_splits(train_data=data, test_data=None)
        except ValueError:
            assert True

        try:
            BaseMethod.from_splits(train_data=data,
                                   test_data=[],
                                   exclude_unknowns=True)
        except ValueError:
            assert True

        bm = BaseMethod.from_splits(train_data=data[:-1], test_data=data[-1:])
        self.assertEqual(bm.total_users, 10)
        self.assertEqual(bm.total_items, 10)

        bm = BaseMethod.from_splits(
            train_data=data[:-1],
            test_data=data[-1:],
            val_data=[(data[0][0], data[1][1], 5.0)],
            verbose=True,
        )
        self.assertEqual(bm.total_users, 10)
        self.assertEqual(bm.total_items, 10)
Exemple #8
0
    def test_with_modalities(self):
        data = Reader().read("./tests/data.txt")
        sentiment_data = Reader().read("./tests/sentiment_data.txt",
                                       fmt="UITup",
                                       sep=",",
                                       tup_sep=":")
        bm = BaseMethod.from_splits(train_data=data[:-1], test_data=data[-1:])

        self.assertIsNone(bm.user_text)
        self.assertIsNone(bm.item_text)
        self.assertIsNone(bm.user_image)
        self.assertIsNone(bm.item_image)
        self.assertIsNone(bm.user_graph)
        self.assertIsNone(bm.item_graph)
        self.assertIsNone(bm.sentiment)

        bm.user_text = TextModality()
        bm.item_image = ImageModality()
        bm.sentiment = SentimentModality(data=sentiment_data)
        bm._build_modalities()

        try:
            bm.user_text = ImageModality()
        except ValueError:
            assert True

        try:
            bm.item_text = ImageModality()
        except ValueError:
            assert True

        try:
            bm.user_image = TextModality()
        except ValueError:
            assert True

        try:
            bm.item_image = TextModality()
        except ValueError:
            assert True

        try:
            bm.user_graph = TextModality()
        except ValueError:
            assert True

        try:
            bm.item_graph = ImageModality()
        except ValueError:
            assert True

        try:
            bm.sentiment = TextModality()
        except ValueError:
            assert True

        try:
            bm.sentiment = ImageModality()
        except ValueError:
            assert True
def test_from_provided():
    data_file = './tests/data.txt'
    data = Reader.read_uir_triplets(data_file)

    try:
        BaseMethod.from_provided(train_data=None, test_data=None)
    except ValueError:
        assert True

    try:
        BaseMethod.from_provided(train_data=data, test_data=None)
    except ValueError:
        assert True

    bm = BaseMethod.from_provided(train_data=data, test_data=data)

    assert bm.total_users == 10
    assert bm.total_items == 10
Exemple #10
0
def test_with_modules():
    from cornac.data import TextModule, ImageModule, GraphModule

    bm = BaseMethod()

    assert bm.user_text is None
    assert bm.item_text is None
    assert bm.user_image is None
    assert bm.item_image is None
    assert bm.user_graph is None
    assert bm.item_graph is None

    bm.user_text = TextModule()
    #bm.item_graph = GraphModule()
    bm._build_modules()

    try:
        bm.user_text = ImageModule()
    except ValueError:
        assert True

    #try:
    #    bm.item_text = GraphModule()
    #except ValueError:
    #    assert True

    try:
        bm.user_image = TextModule()
    except ValueError:
        assert True

    #try:
    #    bm.item_image = GraphModule()
    #except ValueError:
    #    assert True

    try:
        bm.user_graph = TextModule()
    except ValueError:
        assert True

    try:
        bm.item_graph = ImageModule()
    except ValueError:
        assert True
Exemple #11
0
    def test_from_splits(self):
        data = Reader().read('./tests/data.txt')
        try:
            BaseMethod.from_splits(train_data=None, test_data=None)
        except ValueError:
            assert True

        try:
            BaseMethod.from_splits(train_data=data, test_data=None)
        except ValueError:
            assert True

        bm = BaseMethod.from_splits(train_data=data, test_data=data)
        self.assertEqual(bm.total_users, 10)
        self.assertEqual(bm.total_items, 10)

        bm = BaseMethod.from_splits(train_data=data, test_data=data,
                                    val_data=data, verbose=True)
        self.assertEqual(bm.total_users, 10)
        self.assertEqual(bm.total_items, 10)
Exemple #12
0
def test_from_splits():
    data = reader.read_uir('./tests/data.txt')

    try:
        BaseMethod.from_splits(train_data=None, test_data=None)
    except ValueError:
        assert True

    try:
        BaseMethod.from_splits(train_data=data, test_data=None)
    except ValueError:
        assert True

    bm = BaseMethod.from_splits(train_data=data, test_data=data)
    assert bm.total_users == 10
    assert bm.total_items == 10

    bm = BaseMethod.from_splits(train_data=data,
                                test_data=data,
                                val_data=data,
                                verbose=True)
    assert bm.total_users == 10
    assert bm.total_items == 10
Exemple #13
0
 def test_init(self):
     bm = BaseMethod(None, verbose=True)
     self.assertFalse(bm.exclude_unknowns)
     self.assertEqual(bm.rating_threshold, 1.0)
Exemple #14
0
 def test_evaluate(self):
     data = Reader().read('./tests/data.txt')
     bm = BaseMethod.from_splits(train_data=data, test_data=data)
     model = MF(k=1, max_iter=0)
     result = bm.evaluate(model, metrics=[MAE()], user_based=False)
     result.__str__()
Exemple #15
0
random.shuffle(data)

train = data[math.ceil(0.2 * len(data)):]
test = data[:math.ceil(0.2 * len(data))]

holdout = cornac.data.Reader(bin_threshold=1.0).read(
    fpath='./cs608_ip_probe_v2.csv', sep=",", fmt='UIR', skip_lines=1)

ratio_split = cornac.eval_methods.RatioSplit(data=train,
                                             test_size=0.2,
                                             rating_threshold=0.5,
                                             seed=123)

eval_method = BaseMethod.from_splits(train_data=train,
                                     test_data=test,
                                     val_data=holdout,
                                     exclude_unknowns=True,
                                     verbose=True,
                                     seed=123)

cv = cornac.eval_methods.cross_validation.CrossValidation(
    data,
    n_folds=3,
    rating_threshold=1.0,
    seed=123,
    exclude_unknowns=True,
    verbose=True)

# In[24]:

mae = cornac.metrics.MAE()
rmse = cornac.metrics.RMSE()
Exemple #16
0
from cornac.data import Reader
from cornac.eval_methods import BaseMethod
from cornac.models import MF
from cornac.metrics import MAE, RMSE
from cornac.utils import cache

# Download MovieLens 100K provided training and test splits
reader = Reader()
train_data = reader.read(
    cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.base'))
test_data = reader.read(
    cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.test'))

eval_method = BaseMethod.from_splits(train_data=train_data,
                                     test_data=test_data,
                                     exclude_unknowns=False,
                                     verbose=True)

mf = MF(k=10,
        max_iter=25,
        learning_rate=0.01,
        lambda_reg=0.02,
        use_bias=True,
        early_stop=True,
        verbose=True)

# Evaluation
result = eval_method.evaluate(model=mf,
                              metrics=[MAE(), RMSE()],
                              user_based=True)
print(result)
Exemple #17
0
args = parse_arguments()

os.makedirs(args.out, exist_ok=True)

reader = Reader()
train_data = reader.read(os.path.join(args.indir, "train.txt"), sep=",")
test_data = reader.read(os.path.join(args.indir, "test.txt"), sep=",")
sentiment = reader.read(os.path.join(args.indir, "sentiment.txt"),
                        fmt="UITup",
                        sep=",",
                        tup_sep=":")
md = SentimentModality(data=sentiment)
eval_method = BaseMethod.from_splits(
    train_data=train_data,
    test_data=test_data,
    sentiment=md,
    exclude_unknowns=True,
    verbose=args.verbose,
)

mter = cornac.models.MTER(
    n_user_factors=args.user_factors,
    n_item_factors=args.item_factors,
    n_aspect_factors=args.aspect_factors,
    n_opinion_factors=args.opinion_factors,
    n_bpr_samples=args.bpr_samples,
    n_element_samples=args.element_samples,
    lambda_reg=args.lambda_reg,
    lambda_bpr=args.lambda_bpr,
    max_iter=args.epoch,
    lr=args.learning_rate,
Exemple #18
0
    def test_with_modalities(self):
        bm = BaseMethod()

        self.assertIsNone(bm.user_text)
        self.assertIsNone(bm.item_text)
        self.assertIsNone(bm.user_image)
        self.assertIsNone(bm.item_image)
        self.assertIsNone(bm.user_graph)
        self.assertIsNone(bm.item_graph)

        bm.user_text = TextModality()
        bm.item_image = ImageModality()
        bm._build_modalities()

        try:
            bm.user_text = ImageModality()
        except ValueError:
            assert True

        try:
            bm.item_text = ImageModality()
        except ValueError:
            assert True

        try:
            bm.user_image = TextModality()
        except ValueError:
            assert True

        try:
            bm.item_image = TextModality()
        except ValueError:
            assert True

        try:
            bm.user_graph = TextModality()
        except ValueError:
            assert True

        try:
            bm.item_graph = ImageModality()
        except ValueError:
            assert True
def test_init():
    bm = BaseMethod(None, verbose=True)

    assert not bm.exclude_unknowns
    assert 1. == bm.rating_threshold