Пример #1
0
    def test_from_feature(self):

        # build a toy feature matrix
        import numpy as np
        F = np.zeros((4, 3))
        F[0] = np.asarray([1, 0, 0])
        F[1] = np.asarray([1, 1, 0])
        F[2] = np.asarray([0, 0, 1])
        F[3] = np.asarray([1, 1, 1])

        # the expected output graph
        s = set()
        s.update([(0, 1, 1.0), \
                  (0, 3, 1.0), \
                  (1, 0, 1.0), \
                  (1, 2, 1.0), \
                  (1, 3, 1.0), \
                  (2, 1, 1.0), \
                  (2, 3, 1.0), \
                  (3, 0, 1.0), \
                  (3, 1, 1.0), \
                  (3, 2, 1.0)])

        # build graph modality from features
        gm = GraphModality.from_feature(features=F, k=2, verbose=False)

        self.assertTrue(isinstance(gm, GraphModality))
        self.assertTrue(not bool(gm.raw_data.difference(s)))
Пример #2
0
    def test_build(self):
        data = Reader().read('./tests/graph_data.txt', sep=' ')
        gmd = GraphModality(data=data)

        global_iid_map = OrderedDict()
        for raw_iid, raw_jid, val in data:
            global_iid_map.setdefault(raw_iid, len(global_iid_map))

        gmd.build(id_map=global_iid_map)

        self.assertEqual(len(gmd.map_rid), 7)
        self.assertEqual(len(gmd.map_cid), 7)
        self.assertEqual(len(gmd.val), 7)
        self.assertEqual(gmd.matrix.shape, (7, 7))

        try:
            GraphModality().build()
        except ValueError:
            assert True
Пример #3
0
    def test_get_node_degree(self):
        data = Reader().read('./tests/graph_data.txt', sep=' ')
        gmd = GraphModality(data=data)

        global_iid_map = OrderedDict()
        for raw_iid, raw_jid, val in data:
            global_iid_map.setdefault(raw_iid, len(global_iid_map))

        gmd.build(id_map=global_iid_map)

        degree = gmd.get_node_degree()

        self.assertEqual(degree.get(0)[0], 4)
        self.assertEqual(degree.get(0)[1], 1)
        self.assertEqual(degree.get(1)[0], 2)
        self.assertEqual(degree.get(1)[1], 1)
        self.assertEqual(degree.get(5)[0], 0)
        self.assertEqual(degree.get(5)[1], 1)

        degree = gmd.get_node_degree([0, 1], [0, 1])

        self.assertEqual(degree.get(0)[0], 1)
        self.assertEqual(degree.get(0)[1], 0)
        self.assertEqual(degree.get(1)[0], 0)
        self.assertEqual(degree.get(1)[1], 1)
Пример #4
0
    def test_get_train_triplet(self):
        data = Reader().read('./tests/graph_data.txt', sep=' ')
        gmd = GraphModality(data=data)

        global_iid_map = OrderedDict()
        for raw_iid, raw_jid, val in data:
            global_iid_map.setdefault(raw_iid, len(global_iid_map))

        gmd.build(id_map=global_iid_map)
        rid, cid, val = gmd.get_train_triplet([0, 1, 2], [0, 1, 2])
        self.assertEqual(len(rid), 3)
        self.assertEqual(len(cid), 3)
        self.assertEqual(len(val), 3)

        rid, cid, val = gmd.get_train_triplet([0, 2], [0, 1])
        self.assertEqual(len(rid), 1)
        self.assertEqual(len(cid), 1)
        self.assertEqual(len(val), 1)
Пример #5
0
# limitations under the License.
# ============================================================================
"""Fit to and evaluate MCF on the Office Amazon dataset"""

from cornac.data import GraphModality
from cornac.eval_methods import RatioSplit
from cornac.experiment import Experiment
from cornac import metrics
from cornac.models import MCF
from cornac.datasets import amazon_office as office

# Load office ratings and item contexts, see C2PF paper for details
ratings = office.load_rating()
contexts = office.load_context()

item_graph_modality = GraphModality(data=contexts)

ratio_split = RatioSplit(data=ratings,
                         test_size=0.2,
                         rating_threshold=3.5,
                         exclude_unknowns=True,
                         verbose=True,
                         item_graph=item_graph_modality)

mcf = MCF(k=10, max_iter=40, learning_rate=0.001, verbose=True)

# Evaluation metrics
ndcg = metrics.NDCG(k=-1)
rmse = metrics.RMSE()
rec = metrics.Recall(k=20)
pre = metrics.Precision(k=20)
Пример #6
0
from cornac.data import GraphModality
from cornac.eval_methods import RatioSplit
from cornac.experiment import Experiment
from cornac import metrics
from cornac.models import CVAECF
from cornac.datasets import filmtrust

# In addition to learning from preference data, CVAECF further leverages users' auxiliary data (social network in this example).
# The necessary data can be loaded as follows
ratings = filmtrust.load_feedback()
trust = filmtrust.load_trust()

# Instantiate a GraphModality, it makes it convenient to work with graph (network) auxiliary information
# For more details, please refer to the tutorial on how to work with auxiliary data
user_graph_modality = GraphModality(data=trust)

# Define an evaluation method to split feedback into train and test sets
ratio_split = RatioSplit(
    data=ratings,
    test_size=0.2,
    rating_threshold=2.5,
    exclude_unknowns=True,
    verbose=True,
    user_graph=user_graph_modality,
    seed=123,
)

# Instantiate CVAECF model
cvaecf = CVAECF(z_dim=20,
                h_dim=20,
Пример #7
0
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Example for Social Bayesian Personalized Ranking with Epinions dataset"""

import cornac
from cornac.data import Reader, GraphModality
from cornac.datasets import epinions
from cornac.eval_methods import RatioSplit

ratio_split = RatioSplit(data=epinions.load_data(Reader(bin_threshold=4.0)),
                         test_size=0.1,
                         rating_threshold=0.5,
                         exclude_unknowns=True,
                         verbose=True,
                         user_graph=GraphModality(data=epinions.load_trust()))

sbpr = cornac.models.SBPR(k=10,
                          max_iter=50,
                          learning_rate=0.001,
                          lambda_u=0.015,
                          lambda_v=0.025,
                          lambda_b=0.01,
                          verbose=True)
rec_10 = cornac.metrics.Recall(k=10)

cornac.Experiment(eval_method=ratio_split, models=[sbpr],
                  metrics=[rec_10]).run()
Пример #8
0
from cornac.data import GraphModality
from cornac.eval_methods import RatioSplit
from cornac.experiment import Experiment
from cornac import metrics
from cornac.models import MCF
from cornac.datasets import amazon_office as office

# MCF leverages relationships among items, it jointly factorizes the user-item and item-item matrices
# The necessary data can be loaded as follows
ratings = office.load_feedback()
item_net = office.load_graph()

# Instantiate a GraphModality, it make it convenient to work with graph (network) auxiliary information
# For more details, please refer to the tutorial on how to work with auxiliary data
item_graph_modality = GraphModality(data=item_net)

# Define an evaluation method to split feedback into train and test sets
ratio_split = RatioSplit(data=ratings,
                         test_size=0.2, rating_threshold=3.5,
                         exclude_unknowns=True, verbose=True,
                         item_graph=item_graph_modality)

# Instantiate MCF
mcf = MCF(k=10, max_iter=40, learning_rate=0.001, verbose=True)

# Evaluation metrics
ndcg = metrics.NDCG(k=-1)
rmse = metrics.RMSE()
rec = metrics.Recall(k=20)
pre = metrics.Precision(k=20)
Пример #9
0
    def test_init(self):
        data = Reader().read('./tests/graph_data.txt', sep=' ')
        gmd = GraphModality(data=data)

        self.assertEqual(len(gmd.raw_data), 7)