예제 #1
0
 def input_types(self):
     """Returns definitions of module input ports.
         logit_intent_status: Output of SGD model
         intent_status: intent label
         logit_req_slot_status: Output of SGD model
         requested_slot_status: Takes value 1 if the corresponding slot is requested, 0 otherwise
         logit_cat_slot_status: Output of SGD model
         categorical_slot_status: The status of each categorical slot in the service
         logit_cat_slot_value_status: Output of SGD model
         categorical_slot_value_status: Takes value 1 if the corresponding slot value is correct, 0 otherwise
         logit_noncat_slot_status: Output of SGD model
         noncategorical_slot_status: The status of each noncategorical slot in the service\
         logit_spans: Output of SGD model
         noncategorical_slot_value_start: The index of the starting subword corresponding to the slot span for a non-categorical slot value
         noncategorical_slot_value_end: The index of the ending (inclusive) subword corresponding to the slot span for a non-categorical slot value
         task_mask: Mask contains 1 if its the current task, 0 otherwise
     """
     return {
         "logit_intent_status": NeuralType(('B', 'T'), LogitsType()),
         "intent_status": NeuralType(('B'), LabelsType()),
         "logit_req_slot_status": NeuralType(('B', 'T'), LogitsType()),
         "requested_slot_status": NeuralType(('B'), LabelsType()),
         "logit_cat_slot_status": NeuralType(('B', 'T'), LogitsType()),
         "categorical_slot_status": NeuralType(('B'), LabelsType()),
         "logit_cat_slot_value_status": NeuralType(('B', 'T'), LogitsType()),
         "categorical_slot_value_status": NeuralType(('B'), LabelsType()),
         "logit_noncat_slot_status": NeuralType(('B', 'T'), LogitsType()),
         "noncategorical_slot_status": NeuralType(('B'), LabelsType()),
         "logit_spans": NeuralType(('B', 'T', 'D'), LogitsType()),
         "noncategorical_slot_value_start": NeuralType(('B'), LabelsType()),
         "noncategorical_slot_value_end": NeuralType(('B'), LabelsType()),
         "task_mask": NeuralType(('B', 'T'), ChannelType()),
     }
예제 #2
0
 def output_ports(self):
     return {
         "mod_out":
         NeuralType(
             (AxisType(AxisKind.Batch), AxisType(AxisKind.Dimension, 1)),
             ChannelType())
     }
예제 #3
0
            def output_ports(self):
                """Returns definitions of module output ports.

                Returns:
                  A (dict) of module's output ports names to NeuralTypes mapping
                """
                return {"y_pred": NeuralType(('B', 'D'), ChannelType())}
    def output_ports(self):
        """Returns definitions of module output ports.

        src_ids: ids of input sequences

        src_lens: lengths of input sequences

        tgt_ids: labels for the generator output

        tgt_lens: lengths of the generator targets

        gating_labels: labels for the gating head

        turn_domain: list of the domains
            NeuralType(None)

        """
        return {
            # "src_ids": NeuralType({0: AxisType(BatchTag), 1: AxisType(TimeTag)}),
            # "src_lens": NeuralType({0: AxisType(BatchTag)}),
            # "tgt_ids": NeuralType({0: AxisType(BatchTag), 1: AxisType(ChannelTag), 2: AxisType(TimeTag)}),
            # "tgt_lens": NeuralType({0: AxisType(BatchTag), 1: AxisType(ChannelTag)}),
            # "gating_labels": NeuralType({0: AxisType(BatchTag), 1: AxisType(ChannelTag)}),
            # "turn_domain": NeuralType(None),
            "src_ids": NeuralType(('B', 'T'), ChannelType()),
            "src_lens": NeuralType(tuple('B'), LengthsType()),
            "tgt_ids": NeuralType(('B', 'D', 'T'), LabelsType()),
            "tgt_lens": NeuralType(('B', 'D'), LengthsType()),
            "gating_labels": NeuralType(('B', 'D'), LabelsType()),
            "turn_domain": NeuralType(),
        }
예제 #5
0
 def output_types(self) -> Optional[Dict[str, NeuralType]]:
     """Returns definitions of module output ports.
            """
     return {
         'input_ids':
         NeuralType(('B', 'T'), ChannelType()),
         'segment_ids':
         NeuralType(('B', 'T'), ChannelType()),
         'input_mask':
         NeuralType(('B', 'T'), MaskType()),
         "labels":
         NeuralType(
             tuple('B'),
             RegressionValuesType()
             if self.task_name == 'sts-b' else CategoricalValuesType()),
     }
예제 #6
0
 def input_ports(self):
     """Returns definitions of module input ports.
     """
     return {
         # "interpolated_image": NeuralType(
         #     {
         #         0: AxisType(BatchTag),
         #         1: AxisType(ChannelTag),
         #         2: AxisType(HeightTag, 28),
         #         3: AxisType(WidthTag, 28),
         #     }
         # ),
         # "interpolated_decision": NeuralType({0: AxisType(BatchTag), 1: AxisType(ChannelTag, 1)}),
         "interpolated_image": NeuralType(('B', 'C', 'H', 'W'),
                                          ChannelType()),
         "interpolated_decision": NeuralType(('B', 'C'), ChannelType()),
     }
예제 #7
0
 def output_ports(self):
     # return {"mod_out": NeuralType({0: AxisType(BatchTag), 1: AxisType(BaseTag, dim=1)})}
     return {
         "mod_out":
         NeuralType(
             (AxisType(AxisKind.Batch), AxisType(AxisKind.Dimension, 1)),
             ChannelType())
     }
예제 #8
0
    def output_types(self) -> Optional[Dict[str, NeuralType]]:
        """
        Returns definitions of module output ports.
        """

        return {
            "logits": NeuralType(('B', 'T'), LogitsType()),
            'hidden_states': NeuralType(('B', 'T', 'C'), ChannelType()),
        }
예제 #9
0
    def input_ports(self):
        """Return definitions of module input ports.

        Returns:
            Module input ports.
        """
        return {
            "encoding": NeuralType(('B', 'C', 'H', 'W'), ChannelType()),
        }
예제 #10
0
    def test_big_void(self):
        big_void_1 = NeuralType(elements_type=VoidType())
        big_void_2 = NeuralType()

        btc_spctr = NeuralType(('B', 'T', 'C'), SpectrogramType())
        btc_spct_bad = NeuralType(('B', 'T'), SpectrogramType())
        t1 = NeuralType(
            axes=(
                AxisType(kind=AxisKind.Batch, size=None, is_list=True),
                AxisType(kind=AxisKind.Time, size=None, is_list=True),
                AxisType(kind=AxisKind.Dimension, size=32, is_list=False),
                AxisType(kind=AxisKind.Dimension, size=128, is_list=False),
                AxisType(kind=AxisKind.Dimension, size=256, is_list=False),
            ),
            elements_type=ChannelType(),
        )
        t2 = NeuralType(
            axes=(
                AxisType(kind=AxisKind.Batch, size=None, is_list=False),
                AxisType(kind=AxisKind.Time, size=None, is_list=False),
                AxisType(kind=AxisKind.Dimension, size=32, is_list=False),
                AxisType(kind=AxisKind.Dimension, size=128, is_list=False),
                AxisType(kind=AxisKind.Dimension, size=256, is_list=False),
            ),
            elements_type=ChannelType(),
        )

        self.assertEqual(big_void_1.compare(btc_spctr),
                         NeuralTypeComparisonResult.SAME)
        self.assertEqual(big_void_1.compare(btc_spct_bad),
                         NeuralTypeComparisonResult.SAME)
        self.assertEqual(big_void_1.compare(t1),
                         NeuralTypeComparisonResult.SAME)
        self.assertEqual(big_void_1.compare(t2),
                         NeuralTypeComparisonResult.SAME)

        self.assertEqual(big_void_2.compare(btc_spctr),
                         NeuralTypeComparisonResult.SAME)
        self.assertEqual(big_void_2.compare(btc_spct_bad),
                         NeuralTypeComparisonResult.SAME)
        self.assertEqual(big_void_2.compare(t1),
                         NeuralTypeComparisonResult.SAME)
        self.assertEqual(big_void_2.compare(t2),
                         NeuralTypeComparisonResult.SAME)
예제 #11
0
파일: search.py 프로젝트: benhoff/NeMo
 def input_ports(self):
     """Returns definitions of module input ports.
     """
     return {
         # 'encoder_outputs': NeuralType(
         #     {0: AxisType(BatchTag), 1: AxisType(TimeTag), 2: AxisType(ChannelTag),}, optional=True,
         # )
         "encoder_outputs":
         NeuralType(('B', 'T', 'D'), ChannelType(), optional=True)
     }
예제 #12
0
    def input_ports(self):
        """Returns definitions of module input ports.

        decision:
            0: AxisType(BatchTag)

            1: AxisType(ChannelTag, 1)
        """
        return {
            # "decision": NeuralType({0: AxisType(BatchTag), 1: AxisType(ChannelTag, 1)}),
            "decision": NeuralType(('B', 'D'), ChannelType())
        }
예제 #13
0
    def test_infer_errors(self):

        data_source = nemo.backends.pytorch.common.ZerosDataLayer(
            size=1,
            dtype=torch.FloatTensor,
            batch_size=1,
            output_ports={
                "dl_out":
                NeuralType((AxisType(
                    AxisKind.Batch), AxisType(AxisKind.Dimension, 1)),
                           ChannelType())
            },
        )
        addten = AddsTen()
        minusten = SubtractsTen()

        zero_tensor = data_source()
        ten_tensor = addten(mod_in=zero_tensor)
        twenty_tensor = addten(mod_in=ten_tensor)
        thirty_tensor = addten(mod_in=twenty_tensor)

        with self.assertRaisesRegex(ValueError,
                                    "use_cache was set, but cache was empty"):
            evaluated_tensors = self.nf.infer(
                tensors=[twenty_tensor, thirty_tensor],
                verbose=False,
                use_cache=True)

        new_ten_tensor = minusten(mod_in=twenty_tensor)
        evaluated_tensors = self.nf.infer(tensors=[new_ten_tensor],
                                          verbose=False,
                                          cache=True)

        with self.assertRaisesRegex(ValueError,
                                    "cache was set but was not empty"):
            evaluated_tensors = self.nf.infer(
                tensors=[twenty_tensor, thirty_tensor],
                verbose=False,
                cache=True)

        self.nf.clear_cache()
        evaluated_tensors = self.nf.infer(tensors=[new_ten_tensor],
                                          verbose=False,
                                          cache=True)

        with self.assertRaisesRegex(ValueError,
                                    "cache and use_cache were both set."):
            evaluated_tensors = self.nf.infer(
                tensors=[twenty_tensor, thirty_tensor],
                verbose=False,
                cache=True,
                use_cache=True)
        self.assertEqual(evaluated_tensors[0][0].squeeze().data, 10)
예제 #14
0
 def input_types(self) -> Optional[Dict[str, NeuralType]]:
     return {
         "input_ids": NeuralType(('B', 'T'), ChannelType()),
         "token_type_ids": NeuralType(('B', 'T'),
                                      ChannelType(),
                                      optional=True),
         "attention_mask": NeuralType(('B', 'T'), MaskType(),
                                      optional=True),
         "labels": NeuralType(('B', 'T'), ChannelType(), optional=True),
         'past_key_values':
         [[NeuralType(None, StringType(), optional=True)]],
         'use_cache': NeuralType(None, VoidType(), optional=True),
         'position_ids': NeuralType(('B', 'T'),
                                    ChannelType(),
                                    optional=True),
         "return_dict": NeuralType(None, StringType(), optional=True),
         "output_attentions": NeuralType(None, StringType(), optional=True),
         "output_hidden_states": NeuralType(None,
                                            StringType(),
                                            optional=True),
         "max_length": NeuralType(None, IntType(), optional=True),
     }
예제 #15
0
    def test_call_TaylorNet(self):
        x_tg = NmTensor(
            producer=None,
            producer_args=None,
            output_port_name=None,
            ntype=NeuralType(elements_type=ChannelType(), axes=('B', 'D')),
        )

        tn = TaylorNet(dim=4)
        # note that real port's name: x was used
        y_pred = tn(x=x_tg)
        self.assertEqual(y_pred.producer, tn)
        self.assertEqual(y_pred.producer_args.get("x"), x_tg)
예제 #16
0
    def test_call_TaylorNet(self):
        x_tg = nemo.core.neural_modules.NmTensor(
            producer=None,
            producer_args=None,
            name=None,
            ntype=NeuralType(elements_type=ChannelType(), axes=('B', 'D')),
        )

        tn = nemo.backends.pytorch.tutorials.TaylorNet(dim=4)
        # note that real port's name: x was used
        y_pred = tn(x=x_tg)
        self.assertEqual(y_pred.producer, tn)
        self.assertEqual(y_pred.producer_args.get("x"), x_tg)
예제 #17
0
 def test_list_of_lists(self):
     T1 = NeuralType(
         axes=(
             AxisType(kind=AxisKind.Batch, size=None, is_list=True),
             AxisType(kind=AxisKind.Time, size=None, is_list=True),
             AxisType(kind=AxisKind.Dimension, size=32, is_list=False),
             AxisType(kind=AxisKind.Dimension, size=128, is_list=False),
             AxisType(kind=AxisKind.Dimension, size=256, is_list=False),
         ),
         elements_type=ChannelType(),
     )
     T2 = NeuralType(
         axes=(
             AxisType(kind=AxisKind.Batch, size=None, is_list=False),
             AxisType(kind=AxisKind.Time, size=None, is_list=False),
             AxisType(kind=AxisKind.Dimension, size=32, is_list=False),
             AxisType(kind=AxisKind.Dimension, size=128, is_list=False),
             AxisType(kind=AxisKind.Dimension, size=256, is_list=False),
         ),
         elements_type=ChannelType(),
     )
     # TODO: should this be incompatible instead???
     assert T1.compare(T2), NeuralTypeComparisonResult.TRANSPOSE_SAME
예제 #18
0
 def input_ports(self):
     """Returns definitions of module input ports.
     """
     return {
         # "latents": NeuralType(
         #     {
         #         0: AxisType(BatchTag),
         #         1: AxisType(ChannelTag, 64),
         #         2: AxisType(HeightTag, 4),
         #         3: AxisType(WidthTag, 4),
         #     }
         # )
         "latents": NeuralType(('B', 'C', 'H', 'W'), ChannelType())
     }
예제 #19
0
 def output_ports(self):
     """Returns definitions of module output ports.
     """
     return {
         # "image": NeuralType(
         #     {
         #         0: AxisType(BatchTag),
         #         1: AxisType(ChannelTag),
         #         2: AxisType(HeightTag, 28),
         #         3: AxisType(WidthTag, 28),
         #     }
         # )
         "image": NeuralType(('B', 'C', 'H', 'W'), ChannelType())
     }
예제 #20
0
파일: dataset.py 프로젝트: blisc/NeMo
 def output_types(self) -> Optional[Dict[str, NeuralType]]:
     """Returns definitions of module output ports."""
     return {
         "example_id_num": NeuralType(('B', 'T'), ChannelType()),
         "service_id": NeuralType(('B'), ChannelType()),
         "utterance_ids": NeuralType(('B', 'T'), ChannelType()),
         "token_type_ids": NeuralType(('B', 'T'),
                                      ChannelType()),  # utterance segment
         "attention_mask": NeuralType(('B', 'T'),
                                      ChannelType()),  # utterance mask
         "intent_status": NeuralType(('B'), LabelsType()),
         "requested_slot_status": NeuralType(('B'), LabelsType()),
         "categorical_slot_status": NeuralType(('B'), LabelsType()),
         "categorical_slot_value_status": NeuralType(('B'), LabelsType()),
         "noncategorical_slot_status": NeuralType(('B'), LabelsType()),
         "noncategorical_slot_value_start": NeuralType(('B'), LabelsType()),
         "noncategorical_slot_value_end": NeuralType(('B'), LabelsType()),
         "start_char_idx": NeuralType(('B', 'T'), LabelsType()),
         "end_char_idx": NeuralType(('B', 'T'), LabelsType()),
         "task_mask": NeuralType(('B', 'T'), ChannelType()),
     }
예제 #21
0
 def input_types(self) -> Optional[Dict[str, NeuralType]]:
     return {
         "enc_taskname":
         NeuralType(('B', 'T', 'C'), ChannelType(), optional=True),
     }
 def output_ports(self):
     """Returns definitions of module output ports.
     hidden_states: output embedding 
     """
     return {"hidden_states": NeuralType(('B', 'T', 'D'), ChannelType())}
예제 #23
0
 def output_ports(self):
     """Returns definitions of module output ports.
     """
     # return {"decision": NeuralType({0: AxisType(BatchTag), 1: AxisType(ChannelTag, 1)})}
     return {"decision": NeuralType(('B', 'C'), ChannelType())}
예제 #24
0
 def output_ports(self):
     """Returns definitions of module output ports."""
     return dict(pred=NeuralType(('B', 'T', 'D'), ChannelType()), len=NeuralType(('B',), LengthsType()))
    def output_ports(self):
        """Returns definitions of module output ports.
        example_id_num (int): example ids
        service_id  (int): service ids
        is_real_example (bool): flag to determine is the example is valid
        utterance_ids (int): utterance ids
        utterance_segment (int): Denotes the identity of the sequence. Takes values 0 (system utterance) and 1 (user utterance)
        utterance_mask (int): Mask which takes the value 0 for padded tokens and 1 otherwise
        categorical_slot_status (int): The status of each categorical slot in the service
        cat_slot_status_mask(int): Masks out categorical status for padded cat slots, takes values 0 and 1
        categorical_slot_values (int): The index of the correct value for each categorical slot
        cat_slot_values_mask (int): Masks out categorical slots values for slots not used in the service, takes values 0 and 1
        noncategorical_slot_status (int): The status of each non-categorical slot in the service
        noncat_slot_status_mask(int): Masks out non-categorical status for padded cat slots, takes values 0 and 1
        noncategorical_slot_value_start (int): The index of the starting subword corresponding to the slot span for a non-categorical slot value
        noncategorical_slot_value_end (int): The index of the ending (inclusive) subword corresponding to the slot span for a non-categorical slot value
        start_char_idx (int): Start character indices in the original utterance corresponding to the tokens
        end_char_idx (int): Inclusive end character indices in the original utterance corresponding to the tokens
        num_slots (int): Total number of slots present in the service
        requested_slot_status (int): Takes value 1 if the corresponding slot is requested, 0 otherwise
        req_slot_mask (int): Masks requested slots not used for the particular service
        intent_status_mask (long): Masks out padded intents in the service, takes values 0 and 1
        intent_status_labels (int): Intent labels

        """
        return {
            "example_id_num": NeuralType(('B'), ChannelType()),
            "service_id": NeuralType(('B'), ChannelType()),
            "is_real_example": NeuralType(('B'), ChannelType()),
            "utterance_ids": NeuralType(('B', 'T'), ChannelType()),
            "utterance_segment": NeuralType(('B', 'T'), ChannelType()),
            "utterance_mask": NeuralType(('B', 'T'), ChannelType()),
            "categorical_slot_status": NeuralType(('B', 'T'), LabelsType()),
            "cat_slot_status_mask": NeuralType(('B', 'T'), ChannelType()),
            "categorical_slot_values": NeuralType(('B', 'T'), LabelsType()),
            "cat_slot_values_mask": NeuralType(('B', 'T', 'C'), ChannelType()),
            "noncategorical_slot_status": NeuralType(('B', 'T'), LabelsType()),
            "noncat_slot_status_mask": NeuralType(('B', 'T'), ChannelType()),
            "noncategorical_slot_value_start": NeuralType(('B', 'T'),
                                                          LabelsType()),
            "noncategorical_slot_value_end": NeuralType(('B', 'T'),
                                                        LabelsType()),
            "start_char_idx": NeuralType(('B', 'T'), LabelsType()),
            "end_char_idx": NeuralType(('B', 'T'), LabelsType()),
            "num_slots": NeuralType(('B'), LengthsType()),
            "requested_slot_status": NeuralType(('B', 'T'), LabelsType()),
            "req_slot_mask": NeuralType(('B', 'T'), ChannelType()),
            "intent_status_mask": NeuralType(('B', 'T'), ChannelType()),
            "intent_status_labels": NeuralType(('B'), LabelsType()),
        }
예제 #26
0
 def input_ports(self):
     """Returns definitions of module input ports.
     """
     # return {"input_ids": NeuralType({0: AxisType(BatchTag), 1: AxisType(TimeTag)})}
     return {"input_ids": NeuralType(('B', 'T'), ChannelType())}
예제 #27
0
 def output_types(self) -> Optional[Dict[str, NeuralType]]:
     return {
         'loss': NeuralType(None, FloatType(), optional=True),
         'hidden_states': NeuralType(('B', 'T', 'D'), ChannelType()),
     }
예제 #28
0
 def input_types(self) -> Optional[Dict[str, NeuralType]]:
     """
     Returns definitions of module input ports.
     We implement it here since all NLP classifiers have the same inputs
     """
     return {"hidden_states": NeuralType(('B', 'T', 'D'), ChannelType())}
예제 #29
0
 def output_ports(self):
     """Returns definitions of module output ports.
     """
     # return {"hidden_states": NeuralType({0: AxisType(BatchTag), 1: AxisType(TimeTag), 2: AxisType(ChannelTag)})}
     return {"hidden_states": NeuralType(('B', 'T', 'D'), ChannelType())}
예제 #30
0
 def output_types(self) -> Optional[Dict[str, NeuralType]]:
     return {"output_embeds": NeuralType(('B', 'T', 'C'), ChannelType())}