Exemplo n.º 1
0
    def __deserialize_connections(self, serialized_connections: Dict[str, Any],
                                  modules: Dict[str, NeuralModule]):
        """
        Private method deserializing the connections in the graph.

        Args:
            serialized_steps: Dictionary containing serialized connections.
            modules: List of modules.
        Returns:
            List of connections, in a format enabling graph traversing.
        """
        connections = []
        # Deserialize connections one by one.
        for c in serialized_connections:
            # Deserialize!
            [producer, consumer_type] = c.split("->")
            [consumer, ntype_str] = consumer_type.split(" | ")
            [producer_step, producer_name,
             producer_port_name] = producer.split(".")
            [consumer_step, consumer_name,
             consumer_port_name] = consumer.split(".")
            producer_mp = StepModulePort(int(producer_step), producer_name,
                                         producer_port_name)
            consumer_mp = StepModulePort(int(consumer_step), consumer_name,
                                         consumer_port_name)
            # Get tensor type.
            ntype = modules[producer_name].output_ports[producer_port_name]
            # Validate if neural type is ok.
            assert ntype_str == str(ntype)

            # Add connection.
            connections.append(Connection(producer_mp, consumer_mp, ntype))
        # Ok, done.
        return connections
Exemplo n.º 2
0
 def producer_step_module_port(self) -> StepModulePort:
     """
     Returns:
         A tuple containing step number, module name and corresponding output port name.
     """
     return StepModulePort(self._step_number, self._producer_name,
                           self._output_port_name)
Exemplo n.º 3
0
    def deserialize(cls, serialized_inputs: List[str],
                    modules: Dict[str, 'NeuralModule']):
        """ 
            Class method responsible for deserialization of graph inputs.

            Args:
                serialized_inputs: A list of serialized inputs in the form of ("input->module.input_port")
                modules: List of modules required for neural type copying/checking.

            Returns:
                Dictionary with deserialized inputs.
        """
        inputs = GraphInputs()
        # Iterate through serialized inputs one by one.
        for i in serialized_inputs:
            # Deserialize!
            [key, consumer_ntype] = i.split("->")
            [consumer, ntype_str] = consumer_ntype.split(" | ")
            [consumer_step, consumer_name,
             consumer_port_name] = consumer.split(".")
            # Add the input.
            if key not in inputs.keys():
                # Get neural type from module input port definition.
                ntype = modules[consumer_name].input_ports[consumer_port_name]
                # Make sure the graph bound  port type matches the deserialized type.
                assert ntype_str == str(ntype)

                # Create a new input.
                inputs[key] = ntype
            # Bind the "consumers".
            inputs[key].bind(
                StepModulePort(int(consumer_step), consumer_name,
                               consumer_port_name))
        # Done.
        return inputs
Exemplo n.º 4
0
    def deserialize(self, serialized_outputs: Dict[str, Any], modules: Dict[str, 'NeuralModule']):
        """ 
            Method responsible for deserialization of graph outputs.

            Args:
                serialized_outputs: A list of serialized outputs in the form of ("step.module.output_port->key | ntype")
                modules: List of modules required for neural type copying/checking.
        """
        # Check type.
        if serialized_outputs["type"] == "default":
            # We still need to deserialize.
            # Use-case: deserialization of a graph with nested graph with bound output.
            d = self._default_outputs
        else:
            d = self._manual_outputs

        # Iterate through serialized inputs one by one.
        for i in serialized_outputs["mappings"]:
            # Deserialize!
            [producer, key_ntype] = i.split("->")
            [key, ntype_str] = key_ntype.split(" | ")
            [step_number, producer_name, producer_port_name] = producer.split(".")
            # Get neural type from module output port definition.
            ntype = modules[producer_name].output_ports[producer_port_name]

            # Make sure the graph bound port type matches the deserialized type.
            assert ntype_str == str(ntype)

            # Create a new input.
            go = GraphOutput(ntype, StepModulePort(int(step_number), producer_name, producer_port_name))
            d[key] = go
Exemplo n.º 5
0
    def __call__(self, **kwargs):
        """This method allows objects to be called with their port names

        Args:
          kwargs: Input ports and their values. For example:
          ...
          mymodule1 = Subclass1_of_NeuralModule(...)
          mymodule2 = Subclass2_of_NeuralModule(...)
          ...
          out_port1, out_port2 = mymodule1(input_port1=value1,
          input_port2=value2,
          input_port3=value3)
          out_port11 = mymodule2(input_port1=out_port2)
          ...

        Returns:
          NmTensor object or tuple of NmTensor objects
        """
        # print(" Neural Module:__call__")

        # Set the operation mode of the outer graph.
        self.operation_mode = self._app_state.active_graph.operation_mode
        # The input and output ports definitions can potentially depend on the operation mode!

        # Record the operation (i.e. add a single module).
        step_number = self._app_state.active_graph.record_step(self)

        ###### PROCESS INPUTS. ######
        # Iterate through all passed parameters.
        for port_name, port_content in kwargs.items():
            # Make sure that passed arguments corresponds to one of the input port names.
            if port_name not in self.input_ports.keys():
                raise NeuralPortNameMismatchError(port_name)

            # At that point the input can be one of three types:
            # * NeuralGraph -> bind port using the default name and type.
            # * GraphInput -> check definition, if ok bind port.
            # * NmTensor -> check definition, add self as a "consumer" of a tensor (produced by other module).

            # Check what was actually passed.
            if type(port_content).__name__ == "NeuralGraph":
                # Make sure that port_content is the currently active graph!
                if port_content is not self._app_state.active_graph:
                    raise ConnectionError(
                        "Ports can be bound only by passing the active graph object!"
                    )
                # Create an alias so the logic will be more clear.
                active_graph = port_content

                # This case: we are nesting one graph into another and must bind input port of one graph in another!
                # So generally we must "copy" the of thus module to graog (the inverted logic!).

                # Copy the port "definition" (i.e. is NeuralType) using the same port name.
                active_graph.inputs[port_name] = self.input_ports[port_name]

                # Bind the neural graph input port, i.e. remember that a given graph port should pass data
                # to THIS module-port (when it finally will be connected).
                active_graph.inputs[port_name].bind(
                    StepModulePort(step_number, self.name, port_name))

                # Please note that there are no "consumers" here - this is a "pure binding".

            elif type(port_content).__name__ == "GraphInput":

                # Check if GraphInput belongs to the active graph !
                own_port = False
                for gcontent in self._app_state.active_graph.inputs.values():
                    if gcontent is port_content:
                        own_port = True
                        break
                if not own_port:
                    raise NeuralPortNameMismatchError(port_name)

                # Compare input port definition with the received definition.
                self.input_ports[port_name].compare_and_raise_error(
                    self.__class__.__name__, port_name, port_content.ntype)

                # Bind the neural graph input port, i.e. remember that a given graph port should pass data
                # to THIS module-port (when it finally will be connected).
                port_content.bind(
                    StepModulePort(step_number, self.name, port_name))

                # Please note that there are no "consumers" here - this is a "pure binding".

            elif type(port_content) is NmTensor:
                # Compare input port definition with the received definition.
                self.input_ports[port_name].compare_and_raise_error(
                    self.__class__.__name__, port_name, port_content)

                # Ok, the goal here is to actually "connect": add self (module) as "consumer" to the input tensor.
                port_content.add_consumer(
                    StepModulePort(step_number, self.name, port_name))
            else:
                raise TypeError(
                    "Input '{}' must be of one of three types: NeuralGraph, GraphInput or NmTensor"
                    .format(port_name))

        ###### PRODUCE OUTPUTS. ######
        output_port_defs = self.output_ports
        # Create output tensors.
        if len(output_port_defs) == 1:
            # Get port name and type.
            out_name = list(output_port_defs)[0]
            out_type = output_port_defs[out_name]

            # Create a single returned tensor.
            results = NmTensor(
                producer=self,
                producer_args=kwargs,
                output_port_name=out_name,
                ntype=out_type,
            )

            # Bind the "default" output ports.
            self._app_state.active_graph.bind_outputs(results)
        else:
            # Create output tensors.
            output_tensors = []
            for out_name, out_type in output_port_defs.items():
                output_tensors.append(
                    NmTensor(
                        producer=self,
                        producer_args=kwargs,
                        output_port_name=out_name,
                        ntype=out_type,
                    ))

            # Create a named tuple type enabling to access outputs by attributes (e.g. out.x).
            output_class_name = f'{self.__class__.__name__}Output'
            result_type = namedtuple(typename=output_class_name,
                                     field_names=output_port_defs.keys())

            # Create the returned tuple object.
            results = result_type(*output_tensors)

            # Bind the output tensors.
            self._app_state.active_graph.bind_outputs(output_tensors)

        # Return the results.
        return results