def test_multitask_custom_attachments(self): """Attach the task heads at user-specified layers""" edges = [(0, 1)] cards = [2, 2] tg = TaskHierarchy(cards, edges) em = MTEndModel( layer_out_dims=[2, 8, 4], task_graph=tg, seed=1, verbose=False, task_head_layers=[1, 2], ) self.assertEqual(em.task_map[1][0], 0) self.assertEqual(em.task_map[2][0], 1) em.train_model( (self.Xs[0], self.Ys[0]), valid_data=(self.Xs[1], self.Ys[1]), verbose=False, n_epochs=10, checkpoint=False, ) score = em.score((self.Xs[2], self.Ys[2]), reduce="mean", verbose=False) self.assertGreater(score, 0.95)
def __init__(self, task_graph=None, input_module=None, seed=None, **kwargs): defaults = recursive_merge_dicts(em_default_config, mt_em_default_config, misses='insert') self.config = recursive_merge_dicts(defaults, kwargs) # If no task_graph is specified, default to a single binary task if task_graph is None: task_graph = TaskHierarchy(edges=[], cardinalities=[2]) self.task_graph = task_graph self.K_t = self.task_graph.K_t # Cardinalities by task self.T = self.task_graph.T # Total number of tasks MTClassifier.__init__(self, cardinalities=self.K_t, seed=seed) if input_module is None: input_module = IdentityModule(self.config['layer_output_dims'][0]) self._build(input_module) # Show network if self.config['verbose']: print("\nNetwork architecture:") self._print() print()
def LabelLearner(cardinalities=2, dependencies=[], **kwargs): """ A generative factory function for data programming (MeTal). """ # Use single task label model if cardinality is an integer, # Otherwise use multi task label model if isinstance(cardinalities, int): logger.info("Using MeTaL single task label model") label_model = LabelModel(k=cardinalities, **kwargs) else: logger.info("Using MeTaL multi task label model") task_graph = TaskHierarchy(cardinalities=cardinalities, edges=dependencies) label_model = MTLabelModel(task_graph=task_graph, **kwargs) return label_model
def test_scoring(self): edges = [(0, 1)] cards = [2, 2] tg = TaskHierarchy(cards, edges) em = MTEndModel(layer_out_dims=[2, 8, 4], task_graph=tg, seed=1, verbose=False) em.train_model( (self.Xs[0], self.Ys[0]), valid_data=(self.Xs[1], self.Ys[1]), verbose=False, n_epochs=3, checkpoint=False, validation_task=0, ) tasks = [0, 1] for metric in METRICS: all_scores = em.score((self.Xs[2], self.Ys[2]), metric=metric, reduce=None, verbose=False) task_specific_scores_score_method = [ em.score( (self.Xs[2], self.Ys[2]), metric=metric, validation_task=task, verbose=False, ) for task in tasks ] task_specific_scores_score_task_method = [ em.score_task(self.Xs[2], self.Ys[2], t=task, metric=metric, verbose=False) for task in tasks ] for i in range(len(tasks)): self.assertEqual( all_scores[i], task_specific_scores_score_method[i], task_specific_scores_score_task_method[i], )
def test_multitask_custom(self): edges = [(0, 1)] cards = [2, 2] tg = TaskHierarchy(edges, cards) em = MTEndModel( task_graph=tg, seed=1, verbose=False, dropout=0.0, layer_output_dims=[2, 8, 4], task_head_layers=[1, 2], ) em.train( self.Xs[0], self.Ys[0], self.Xs[1], self.Ys[1], verbose=False, n_epochs=10, ) score = em.score(self.Xs[2], self.Ys[2], reduce='mean', verbose=False) self.assertGreater(score, 0.95)