Esempio n. 1
0
        )

    # Output decoder.
    trade_output_decoder = TradeStateUpdateNM(data_desc=data_desc)

    # DPM module.
    rule_based_policy = RuleBasedDPMMultiWOZ(data_dir=abs_data_dir)
    # NLG module.
    template_nlg = TemplateNLGMultiWOZ()

    # Updates dialog history with system utterance.
    sys_utter_history_update = SystemUtteranceHistoryUpdate()

    # Construct the "evaluation" (inference) neural graph by connecting the modules using nmTensors.
    # Note: Using the same names for passed nmTensor as in the actual forward pass.
    with NeuralGraph(
            operation_mode=OperationMode.evaluation) as dialog_pipeline:
        # 1.1. User utterance encoder.
        # Bind all the input ports of this module.
        dialog_ids, dialog_lens, dialog_history = user_utterance_encoder(
            user_uttr=dialog_pipeline,
            dialog_history=dialog_pipeline,
        )
        # Fire step 1: 1.2. TRADE encoder.
        outputs, hidden = trade_encoder(inputs=dialog_ids,
                                        input_lens=dialog_lens)
        # 1.3. TRADE generator.
        point_outputs, gate_outputs = trade_decoder(
            encoder_hidden=hidden,
            encoder_outputs=outputs,
            dialog_ids=dialog_ids,
            dialog_lens=dialog_lens,
Esempio n. 2
0
    def __init__(
        self,
        preprocessor_params: Dict,
        encoder_params: Dict,
        decoder_params: Dict,
        spec_augment_params: Optional[Dict] = None,
    ):
        super().__init__()
        # Instantiate necessary modules
        self.__vocabulary = None
        preprocessor, spec_augmentation, encoder, decoder = self.__instantiate_modules(
            preprocessor_params, encoder_params, decoder_params,
            spec_augment_params)
        self._operation_mode = OperationMode.training

        # self.__training_neural_graph = NeuralGraph(operation_mode=OperationMode.training)
        self.__training_neural_graph = NeuralGraph(
            operation_mode=OperationMode.both)
        with self.__training_neural_graph:
            # Copy one input port definitions - using "user" port names.
            self.__training_neural_graph.inputs[
                "input_signal"] = preprocessor.input_ports["input_signal"]
            self.__training_neural_graph.inputs[
                "length"] = preprocessor.input_ports["length"]
            # Bind the selected inputs. Connect the modules
            i_processed_signal, i_processed_signal_len = preprocessor(
                input_signal=self.__training_neural_graph.
                inputs["input_signal"],
                length=self.__training_neural_graph.inputs["length"],
            )
            if spec_augmentation is not None:
                i_processed_signal = spec_augmentation(
                    input_spec=i_processed_signal)
            i_encoded, i_encoded_len = encoder(audio_signal=i_processed_signal,
                                               length=i_processed_signal_len)
            i_log_probs = decoder(encoder_output=i_encoded)
            # Bind the selected outputs.
            self.__training_neural_graph.outputs["log_probs"] = i_log_probs
            self.__training_neural_graph.outputs["encoded_len"] = i_encoded_len

        # self.__evaluation_neural_graph = NeuralGraph(operation_mode=OperationMode.evaluation)
        self.__evaluation_neural_graph = NeuralGraph(
            operation_mode=OperationMode.both)
        with self.__evaluation_neural_graph:
            # Copy one input port definitions - using "user" port names.
            self.__evaluation_neural_graph.inputs[
                "input_signal"] = preprocessor.input_ports["input_signal"]
            self.__evaluation_neural_graph.inputs[
                "length"] = preprocessor.input_ports["length"]
            # Bind the selected inputs. Connect the modules
            i_processed_signal, i_processed_signal_len = preprocessor(
                input_signal=self.__evaluation_neural_graph.
                inputs["input_signal"],
                length=self.__evaluation_neural_graph.inputs["length"],
            )
            # Notice lack of speck augmentation for inference
            i_encoded, i_encoded_len = encoder(audio_signal=i_processed_signal,
                                               length=i_processed_signal_len)
            i_log_probs = decoder(encoder_output=i_encoded)
            # Bind the selected outputs.
            self.__evaluation_neural_graph.outputs["log_probs"] = i_log_probs
            self.__evaluation_neural_graph.outputs[
                "encoded_len"] = i_encoded_len
    # Instantiate Neural Factory.
    nf = NeuralModuleFactory(local_rank=args.local_rank,
                             placement=DeviceType.CPU)

    # Data layer for training.
    cifar10_dl = CIFAR10DataLayer(train=True)
    # The "model".
    cnn = ConvNetEncoder(input_depth=3, input_height=32, input_width=32)
    reshaper = ReshapeTensor(input_sizes=[-1, 16, 2, 2], output_sizes=[-1, 64])
    ffn = FeedForwardNetwork(input_size=64, output_size=10, dropout_rate=0.1)
    nl = NonLinearity(type="logsoftmax", sizes=[-1, 10])
    # Loss.
    nll_loss = NLLLoss()

    # Create a training graph.
    with NeuralGraph(operation_mode=OperationMode.training) as training_graph:
        _, img, tgt = cifar10_dl()
        feat_map = cnn(inputs=img)
        res_img = reshaper(inputs=feat_map)
        logits = ffn(inputs=res_img)
        pred = nl(inputs=logits)
        loss = nll_loss(predictions=pred, targets=tgt)
        # Set output - that output will be used for training.
        training_graph.outputs["loss"] = loss

    # Display the graph summmary.
    logging.info(training_graph.summary())

    # SimpleLossLoggerCallback will print loss values to console.
    callback = SimpleLossLoggerCallback(
        tensors=[loss],
Esempio n. 4
0
    def test_graph_nesting2_possible_operation_modes(self):
        """ 
            Tests whether invalid nesting (i.e. nesting of graphs with incompatible modes) throw exeptions.
        """
        # Instantiate the necessary neural modules.
        dl = RealFunctionDataLayer(n=10, batch_size=1)

        with NeuralGraph(operation_mode=OperationMode.both) as both:
            _, _ = dl()

        with NeuralGraph(operation_mode=OperationMode.training) as training:
            _, _ = dl()

        with NeuralGraph(operation_mode=OperationMode.evaluation) as inference:
            _, _ = dl()

        # Allowed operations.
        # Can nest 'both' into 'training'.
        with NeuralGraph(operation_mode=OperationMode.training):
            _, _ = both()

        # Can nest 'both' into 'inference'.
        with NeuralGraph(operation_mode=OperationMode.evaluation):
            _, _ = both()

        # Can nest 'training' into 'training'.
        with NeuralGraph(operation_mode=OperationMode.training):
            _, _ = training()

        # Can nest 'inference' into 'inference'.
        with NeuralGraph(operation_mode=OperationMode.evaluation):
            _, _ = inference()

        # Can nest 'both' into 'both'.
        with NeuralGraph(operation_mode=OperationMode.both):
            _, _ = both()

        # Operations not allowed.
        # Cannot nest 'inference' into 'training'.
        with pytest.raises(TypeError):
            with NeuralGraph(operation_mode=OperationMode.training):
                _, _ = inference()

        # Cannot nest 'training' into 'inference'.
        with pytest.raises(TypeError):
            with NeuralGraph(operation_mode=OperationMode.evaluation):
                _, _ = training()

        # Cannot nest 'training' into 'both'.
        with pytest.raises(TypeError):
            with NeuralGraph(operation_mode=OperationMode.both):
                _, _ = training()

        # Cannot nest 'inference' into 'both'.
        with pytest.raises(TypeError):
            with NeuralGraph(operation_mode=OperationMode.both):
                _, _ = inference()
Esempio n. 5
0
    def test_graph_nesting7_topology_copy_one_module_all_manual_connect(self):
        """
            Test whether when nesting of one graph into another will result in copy of the graph topology (tensors).
            Case: manual binding of inputs and outputs, connects to other modules.
        """
        ds = RealFunctionDataLayer(n=10, batch_size=1, name="tgn7_ds")
        tn = TaylorNet(dim=4, name="tgn7_tn")
        loss = MSELoss(name="tgn7_loss")

        # Create the "inner graph".
        with NeuralGraph(operation_mode=OperationMode.training,
                         name="tgn7_g1") as g1:
            # Copy the input type.
            g1.inputs["inner_x"] = tn.input_ports["x"]
            # Manually bind the input port.
            y_pred1 = tn(x=g1.inputs["inner_x"])
            # Manually bind the output port.
            g1.outputs["inner_y_pred"] = y_pred1

        # Create the "outer graph".
        with NeuralGraph(operation_mode=OperationMode.training,
                         name="tgn7_g2") as g2:
            x, y = ds()
            y_pred2 = g1(inner_x=x)
            lss = loss(predictions=y_pred2, target=y)

        # Check steps.
        assert len(g2.steps) == 3
        assert g2.steps[1] == g1.steps[0]

        # Make sure that the modules are the same.
        assert len(g2) == 3
        assert g2["tgn7_tn"] is g1["tgn7_tn"]

        # Make sure that inputs are ok.
        assert len(g2.inputs) == 0

        # Check outputs.
        assert len(g2.outputs) == 4
        assert g2.output_ports["x"].compare(
            ds.output_ports["x"]) == NeuralTypeComparisonResult.SAME
        assert g2.output_ports["y"].compare(
            ds.output_ports["y"]) == NeuralTypeComparisonResult.SAME
        assert g2.output_ports["loss"].compare(
            loss.output_ports["loss"]) == NeuralTypeComparisonResult.SAME
        # The manually bound name!
        assert g2.output_ports["inner_y_pred"].compare(
            tn.output_ports["y_pred"]) == NeuralTypeComparisonResult.SAME

        # Check the output tensors.
        assert len(g2.output_tensors) == 4
        assert g2.output_tensors["x"] == x
        assert g2.output_tensors["y"] == y
        assert g2.output_tensors["loss"] == lss
        # The manually bound name!
        assert g2.output_tensors["inner_y_pred"] == y_pred2

        # Check the "internal tensors".
        assert y_pred2 is not y_pred1
        assert g2.tensors[0]["x"] == x
        assert g2.tensors[0]["y"] == y
        assert g2.tensors[2]["loss"] == lss
        # Internally the name "y_pred" is used, not the "bound output name": "inner_y_pred"!
        assert g2.tensors[1]["y_pred"] == y_pred2

        # Update g2: manually bound only one output.
        with g2:
            g2.outputs["outer_loss"] = lss

        # Make sure that outputs are ok.
        assert len(g2.outputs) == 1
        assert g2.output_ports["outer_loss"].compare(
            loss.output_ports["loss"]) == NeuralTypeComparisonResult.SAME
        assert g2.output_tensors["outer_loss"] is lss
Esempio n. 6
0
    # Parse the arguments
    args = parser.parse_args()

    # Instantiate Neural Factory.
    nf = NeuralModuleFactory(local_rank=args.local_rank)

    # Data layers for training and validation.
    dl = MNISTDataLayer(height=32, width=32, train=True)
    dl_e = MNISTDataLayer(height=32, width=32, train=False)
    # The "model".
    lenet5 = LeNet5()
    # Loss.
    nll_loss = NLLLoss()

    # Create a training graph.
    with NeuralGraph(operation_mode=OperationMode.training) as training_graph:
        _, x, y, _ = dl()
        p = lenet5(images=x)
        loss = nll_loss(predictions=p, targets=y)

    # Display the graph summmary.
    logging.info(training_graph.summary())

    # Create a validation graph, starting from the second data layer.
    with NeuralGraph(
            operation_mode=OperationMode.evaluation) as evaluation_graph:
        _, x, y, _ = dl_e()
        p = lenet5(images=x)
        loss_e = nll_loss(predictions=p, targets=y)

    # Perform operations on GPU.