Beispiel #1
0
    def __call__(self, **kwargs):
        """
        This method "nests" one existing neural graph into another one.
        Also checks if all inputs were provided and properly connects them.

        Args:
            kwargs: keyword arguments containing dictionary of (input_port_name, port_content).
        """
        # Test operation modes of the nested graphs.
        outer_mode = self._app_state.active_graph.operation_mode
        inner_mode = self.operation_mode

        if inner_mode == OperationMode.evaluation and outer_mode == OperationMode.training:
            raise TypeError("Cannot nest 'inference' graph into 'training'")

        if inner_mode == OperationMode.training and outer_mode == OperationMode.evaluation:
            raise TypeError("Cannot nest 'training' graph into 'inference'")

        if inner_mode == OperationMode.training and outer_mode == OperationMode.both:
            raise TypeError("Cannot nest 'training' graph into 'both'")

        if inner_mode == OperationMode.evaluation and outer_mode == OperationMode.both:
            raise TypeError("Cannot nest 'inference' graph into 'both'")

        # Check inputs: iterate through all inputs passed to the "self".
        for port_name, port_content in kwargs.items():
            # Make sure that passed arguments correspond to input port names.
            if port_name not in self.input_ports.keys():
                raise NeuralPortNameMismatchError(port_name)

        # "Nest" this graph into an active graph.
        results = self._app_state.active_graph.__nest(self, kwargs)

        # Return output tensors.
        return results
Beispiel #2
0
    def __call__(self, **kwargs):
        """This method allows objects to be called with their port names

        Args:
          kwargs: Input ports and their values. For example:
          ...
          mymodule1 = Subclass1_of_NeuralModule(...)
          mymodule2 = Subclass2_of_NeuralModule(...)
          ...
          out_port1, out_port2 = mymodule1(input_port1=value1,
          input_port2=value2,
          input_port3=value3)
          out_port11 = mymodule2(input_port1=out_port2)
          ...

        Returns:
          NmTensor object or tuple of NmTensor objects
        """
        # print(" Neural Module:__call__")

        # Set the operation mode of the outer graph.
        self.operation_mode = self._app_state.active_graph.operation_mode
        # The input and output ports definitions can potentially depend on the operation mode!

        # Record the operation (i.e. add a single module).
        step_number = self._app_state.active_graph.record_step(self)

        ###### PROCESS INPUTS. ######
        # Iterate through all passed parameters.
        for port_name, port_content in kwargs.items():
            # Make sure that passed arguments corresponds to one of the input port names.
            if port_name not in self.input_ports.keys():
                raise NeuralPortNameMismatchError(port_name)

            # At that point the input can be one of three types:
            # * NeuralGraph -> bind port using the default name and type.
            # * GraphInput -> check definition, if ok bind port.
            # * NmTensor -> check definition, add self as a "consumer" of a tensor (produced by other module).

            # Check what was actually passed.
            if type(port_content).__name__ == "NeuralGraph":
                # Make sure that port_content is the currently active graph!
                if port_content is not self._app_state.active_graph:
                    raise ConnectionError(
                        "Ports can be bound only by passing the active graph object!"
                    )
                # Create an alias so the logic will be more clear.
                active_graph = port_content

                # This case: we are nesting one graph into another and must bind input port of one graph in another!
                # So generally we must "copy" the of thus module to graog (the inverted logic!).

                # Copy the port "definition" (i.e. is NeuralType) using the same port name.
                active_graph.inputs[port_name] = self.input_ports[port_name]

                # Bind the neural graph input port, i.e. remember that a given graph port should pass data
                # to THIS module-port (when it finally will be connected).
                active_graph.inputs[port_name].bind(
                    StepModulePort(step_number, self.name, port_name))

                # Please note that there are no "consumers" here - this is a "pure binding".

            elif type(port_content).__name__ == "GraphInput":

                # Check if GraphInput belongs to the active graph !
                own_port = False
                for gcontent in self._app_state.active_graph.inputs.values():
                    if gcontent is port_content:
                        own_port = True
                        break
                if not own_port:
                    raise NeuralPortNameMismatchError(port_name)

                # Compare input port definition with the received definition.
                self.input_ports[port_name].compare_and_raise_error(
                    self.__class__.__name__, port_name, port_content.ntype)

                # Bind the neural graph input port, i.e. remember that a given graph port should pass data
                # to THIS module-port (when it finally will be connected).
                port_content.bind(
                    StepModulePort(step_number, self.name, port_name))

                # Please note that there are no "consumers" here - this is a "pure binding".

            elif type(port_content) is NmTensor:
                # Compare input port definition with the received definition.
                self.input_ports[port_name].compare_and_raise_error(
                    self.__class__.__name__, port_name, port_content)

                # Ok, the goal here is to actually "connect": add self (module) as "consumer" to the input tensor.
                port_content.add_consumer(
                    StepModulePort(step_number, self.name, port_name))
            else:
                raise TypeError(
                    "Input '{}' must be of one of three types: NeuralGraph, GraphInput or NmTensor"
                    .format(port_name))

        ###### PRODUCE OUTPUTS. ######
        output_port_defs = self.output_ports
        # Create output tensors.
        if len(output_port_defs) == 1:
            # Get port name and type.
            out_name = list(output_port_defs)[0]
            out_type = output_port_defs[out_name]

            # Create a single returned tensor.
            results = NmTensor(
                producer=self,
                producer_args=kwargs,
                output_port_name=out_name,
                ntype=out_type,
            )

            # Bind the "default" output ports.
            self._app_state.active_graph.bind_outputs(results)
        else:
            # Create output tensors.
            output_tensors = []
            for out_name, out_type in output_port_defs.items():
                output_tensors.append(
                    NmTensor(
                        producer=self,
                        producer_args=kwargs,
                        output_port_name=out_name,
                        ntype=out_type,
                    ))

            # Create a named tuple type enabling to access outputs by attributes (e.g. out.x).
            output_class_name = f'{self.__class__.__name__}Output'
            result_type = namedtuple(typename=output_class_name,
                                     field_names=output_port_defs.keys())

            # Create the returned tuple object.
            results = result_type(*output_tensors)

            # Bind the output tensors.
            self._app_state.active_graph.bind_outputs(output_tensors)

        # Return the results.
        return results