"""Calculate inverse relative frequency weighting.""" # Calculate in-degree, i.e. number of incoming edges inv, cnt = torch.unique(idx, return_counts=True, return_inverse=True)[1:] return cnt[inv].float().reciprocal() class InverseInDegreeEdgeWeighting(EdgeWeighting): """Normalize messages by inverse in-degree.""" def forward(self, source: torch.LongTensor, target: torch.LongTensor) -> torch.FloatTensor: # noqa: D102 return _inverse_frequency_weighting(idx=target) class InverseOutDegreeEdgeWeighting(EdgeWeighting): """Normalize messages by inverse out-degree.""" def forward(self, source: torch.LongTensor, target: torch.LongTensor) -> torch.FloatTensor: # noqa: D102 return _inverse_frequency_weighting(idx=source) class SymmetricEdgeWeighting(EdgeWeighting): """Normalize messages by product of inverse sqrt of in-degree and out-degree.""" def forward(self, source: torch.LongTensor, target: torch.LongTensor) -> torch.FloatTensor: # noqa: D102 return (_inverse_frequency_weighting(idx=source) * _inverse_frequency_weighting(idx=target)).sqrt() edge_weight_resolver = Resolver.from_subclasses(base=EdgeWeighting, default=SymmetricEdgeWeighting)
self.entity_shape = base.entity_shape self.relation_shape = base.relation_shape self.tail_entity_shape = base.tail_entity_shape # The parameters of the affine transformation: bias self.bias = nn.Parameter(torch.empty(size=tuple()), requires_grad=trainable_bias) self.initial_bias = torch.as_tensor(data=[initial_bias], dtype=torch.get_default_dtype()) # scale. We model this as log(scale) to ensure scale > 0, and thus monotonicity self.log_scale = nn.Parameter(torch.empty(size=tuple()), requires_grad=trainable_scale) self.initial_log_scale = torch.as_tensor(data=[math.log(initial_scale)], dtype=torch.get_default_dtype()) def reset_parameters(self): # noqa: D102 self.bias.data = self.initial_bias.to(device=self.bias.device) self.log_scale.data = self.initial_log_scale.to(device=self.bias.device) def forward( self, h: HeadRepresentation, r: RelationRepresentation, t: TailRepresentation, ) -> torch.FloatTensor: # noqa: D102 return self.log_scale.exp() * self.base(h=h, r=r, t=t) + self.bias interaction_resolver = Resolver.from_subclasses( Interaction, # type: ignore skip={TranslationalInteraction, FunctionalInteraction, MonotonicAffineTransformationInteraction}, suffix=Interaction.__name__, )
__all__ = [ "evaluate", "Evaluator", "MetricResults", "RankBasedEvaluator", "RankBasedMetricResults", "ClassificationEvaluator", "ClassificationMetricResults", "evaluator_resolver", "metric_resolver", "get_metric_list", ] evaluator_resolver = Resolver.from_subclasses( base=Evaluator, # type: ignore default=RankBasedEvaluator, ) _METRICS_SUFFIX = "MetricResults" _METRICS: Set[Type[MetricResults]] = { RankBasedMetricResults, ClassificationMetricResults, } metric_resolver = Resolver( _METRICS, suffix=_METRICS_SUFFIX, base=MetricResults, ) def get_metric_list():
'RESCAL', 'RGCN', 'RotatE', 'SimplE', 'StructuredEmbedding', 'TransD', 'TransE', 'TransH', 'TransR', 'TuckER', 'UnstructuredModel', # Utils 'model_resolver', 'make_model', 'make_model_cls', ] model_resolver = Resolver.from_subclasses( base=Model, skip={ _NewAbstractModel, # We might be able to relax this later ERModel, LiteralModel, # Old style models should never be looked up _OldAbstractModel, EntityEmbeddingModel, EntityRelationEmbeddingModel, }, )
"PomBaseGetter", "PubChemCompoundGetter", "RGDGetter", "ReactomeGetter", "RheaGetter", "SCHEMGetter", "SCOMPGetter", "SDISGetter", "SFAMGetter", "SwissLipidsGetter", "UMLSGetter", "UniProtGetter", "UniProtPtmGetter", "WikiPathwaysGetter", "ZFINGetter", "ontology_resolver", ] def _assert_sorted(): _sorted = sorted(__all__) if _sorted != __all__: raise ValueError(f"unsorted. should be:\n{_sorted}") _assert_sorted() del _assert_sorted ontology_resolver: Resolver[Obo] = Resolver.from_subclasses(base=Obo, suffix="Getter")
def test_make_many(self): """Test the make_many function.""" with self.assertRaises(ValueError): # no default is given self.resolver.make_many(None) with self.assertRaises(ValueError): # wrong number of kwargs is given self.resolver.make_many([], [{}, {}]) with self.assertRaises(ValueError): # wrong number of kwargs is given self.resolver.make_many(["a", "a", "a"], [{}, {}]) # One class, one kwarg instances = self.resolver.make_many("a", dict(name="name")) self.assertEqual([A(name="name")], instances) instances = self.resolver.make_many("a", [dict(name="name")]) self.assertEqual([A(name="name")], instances) instances = self.resolver.make_many(["a"], dict(name="name")) self.assertEqual([A(name="name")], instances) instances = self.resolver.make_many(["a"], [dict(name="name")]) self.assertEqual([A(name="name")], instances) # Single class, multiple kwargs instances = self.resolver.make_many( "a", [dict(name="name1"), dict(name="name2")]) self.assertEqual([A(name="name1"), A(name="name2")], instances) instances = self.resolver.make_many( ["a"], [dict(name="name1"), dict(name="name2")]) self.assertEqual([A(name="name1"), A(name="name2")], instances) # Multiple class, one kwargs instances = self.resolver.make_many(["a", "b", "c"], dict(name="name")) self.assertEqual( [A(name="name"), B(name="name"), C(name="name")], instances) instances = self.resolver.make_many(["a", "b", "c"], [dict(name="name")]) self.assertEqual( [A(name="name"), B(name="name"), C(name="name")], instances) # Multiple class, multiple kwargs instances = self.resolver.make_many( ["a", "b", "c"], [dict(name="name1"), dict(name="name2"), dict(name="name3")]) self.assertEqual([A(name="name1"), B(name="name2"), C(name="name3")], instances) # One class, No kwargs instances = self.resolver.make_many("e") self.assertEqual([E()], instances) instances = self.resolver.make_many(["e"]) self.assertEqual([E()], instances) instances = self.resolver.make_many("e", None) self.assertEqual([E()], instances) instances = self.resolver.make_many(["e"], None) self.assertEqual([E()], instances) instances = self.resolver.make_many(["e"], [None]) self.assertEqual([E()], instances) # No class resolver = Resolver.from_subclasses(Base, default=A) instances = resolver.make_many(None, dict(name="name")) self.assertEqual([A(name="name")], instances)
# other relations for r in range(self.num_relations): source_r, target_r, weights_r = _reduce_relation_specific( relation=r, source=source, target=target, edge_type=edge_type, edge_weights=edge_weights, ) # skip relations without edges if source_r is None: continue # compute message, shape: (num_edges_of_type, num_blocks, block_size) uniq_source_r, inv_source_r = source_r.unique(return_inverse=True) w_r = self.blocks[r] m = torch.einsum('nbi,bij->nbj', x[uniq_source_r], w_r).index_select(dim=0, index=inv_source_r) # optional message weighting if weights_r is not None: m = m * weights_r.unsqueeze(dim=1).unsqueeze(dim=2) # message aggregation out.index_add_(dim=0, index=target_r, source=m) return out.reshape(-1, self.output_dim) decomposition_resolver = Resolver.from_subclasses(base=Decomposition, default=BasesDecomposition)
.. seealso:: https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/blob/master/codes/model.py """ # -w * log sigma(-(m + n)) - log sigma (m + p) # p >> -m => m + p >> 0 => sigma(m + p) ~= 1 => log sigma(m + p) ~= 0 => -log sigma(m + p) ~= 0 # p << -m => m + p << 0 => sigma(m + p) ~= 0 => log sigma(m + p) << 0 => -log sigma(m + p) >> 0 neg_loss = functional.logsigmoid(-neg_scores - self.margin) neg_loss = neg_weights * neg_loss neg_loss = self._reduction_method(neg_loss) pos_loss = functional.logsigmoid(self.margin + pos_scores) pos_loss = self._reduction_method(pos_loss) loss = -pos_loss - neg_loss if self._reduction_method is torch.mean: loss = loss / 2. return loss loss_resolver = Resolver.from_subclasses( Loss, default=MarginRankingLoss, skip={ PairwiseLoss, PointwiseLoss, SetwiseLoss, }, ) for _name, _cls in loss_resolver.lookup_dict.items(): for _synonym in _cls.synonyms or []: loss_resolver.synonyms[_synonym] = _cls