예제 #1
0
파일: nn.py 프로젝트: zhufengGNSS/ddsp
  def __call__(self, *inputs, **kwargs):
    """Wrap the layer's __call__() with dictionary inputs and outputs.

    IMPORTANT: If no input_keys are provided to the constructor, they are
    inferred from the argument names in call(). If no output_keys are provided
    to the constructor, they are inferred from return annotation of call()
    (a list of strings).

    Example:
    ```
    def call(self, f0_hz, loudness) -> ['amps', 'frequencies']:
      ...
      return amps, frequencies
    ```
    Will infer `self.input_keys = ['f0_hz', 'loudness']` and
    `self.output_keys = ['amps', 'frequencies']`. If input_keys, or output_keys
    are provided to the constructor they will override these inferred values.

    Example Usage:
    The the example above works with both tensor inputs `layer(f0_hz, loudness)`
    or a dictionary of tensors `layer({'f0_hz':..., 'loudness':...})`, and in
    both cases will return a dictionary of tensors
    `{'amps':..., 'frequencies':...}`.

    Args:
      *inputs: Arguments passed on to call(). If any arguments are dicts, they
        will be merged and self.input_keys will be read out of them and passed
        to call() while other args will be ignored.
      **kwargs: Keyword arguments passed on to call().

    Returns:
      outputs: A dictionary of layer outputs from call(). If the layer call()
        returns a dictionary it will be returned directly, otherwise the output
        tensors will be wrapped in a dictionary {output_key: output_tensor}.
    """
    # Merge all dictionaries provided in inputs.
    input_dict = {}
    for v in inputs:
      if isinstance(v, dict):
        input_dict.update(v)

    # If any dicts provided, lookup input tensors from those dicts.
    # Otherwise, just use inputs list as input tensors.
    if input_dict:
      inputs = [core.nested_lookup(key, input_dict) for key in self.input_keys]

    # Run input tensors through the model.
    outputs = super().__call__(*inputs, **kwargs)

    # Return dict if call() returns it.
    if isinstance(outputs, dict):
      return outputs
    # Otherwise make a dict from output_keys.
    else:
      outputs = core.make_iterable(outputs)
      if len(self.output_keys) != len(outputs):
        raise ValueError(f'Output keys ({self.output_keys}) must have the same'
                         f'length as outputs ({outputs})')
      return dict(zip(self.output_keys, outputs))
예제 #2
0
    def run_dag(self,
                inputs: TensorDict,
                verbose: bool = False,
                **kwargs) -> TensorDict:
        """Connects and runs submodules of dag.

    Args:
      inputs: A dictionary of input tensors fed to the dag.
      verbose: Print out dag routing when running.
      **kwargs: Other kwargs to pass to submodules, such as keras kwargs.

    Returns:
      A nested dictionary of all the output tensors.
    """
        # Initialize the outputs with inputs to the dag.
        outputs = {'inputs': inputs}
        # TODO(jesseengel): Remove this cluttering of the base namespace. Only there
        # for backwards compatability.
        outputs.update(inputs)

        # Run through the DAG nodes in sequential order.
        for node in self.dag:
            # The first element of the node can be either a module or module_key.
            module_key, input_keys = node[0], node[1]
            module = getattr(self, module_key)
            # Optionally specify output keys if module does not return dict.
            output_keys = node[2] if len(node) > 2 else None

            # Get the inputs to the node.
            inputs = [core.nested_lookup(key, outputs) for key in input_keys]

            # Duck typing to avoid dealing with multiple inheritance of Group modules.
            if is_processor(module):
                # Processor modules.
                module_outputs = module(*inputs,
                                        return_outputs_dict=True,
                                        **kwargs)
            elif is_loss(module):
                # Loss modules.
                module_outputs = module.get_losses_dict(*inputs, **kwargs)
            else:
                # Network modules.
                module_outputs = module(*inputs, **kwargs)

            if not isinstance(module_outputs, dict):
                module_outputs = core.to_dict(module_outputs, output_keys)

            # Add module outputs to the dictionary.
            outputs[module_key] = module_outputs

        # Alias final module output as dag output.
        # 'out' is a reserved key for final dag output.
        outputs['out'] = module_outputs

        return outputs
예제 #3
0
    def get_controls(self, dag_inputs: TensorDict) -> TensorDict:
        """Run the DAG and get complete outputs dictionary for the processor_group.

    Args:
      dag_inputs: A dictionary of input tensors fed to the signal processing
        processor_group.

    Returns:
      A nested dictionary of all the output tensors.
    """
        # Initialize the outputs with inputs to the processor_group.
        outputs = dag_inputs

        # Run through the DAG nodes in sequential order.
        for node in self.dag:
            # Get the node processor and keys to the node input.
            processor, keys = node

            # Logging, only on the first call.
            if not self.built:
                logging.info('Connecting node (%s):', processor.name)
                for i, key in enumerate(keys):
                    logging.info('Input %d: %s', i, key)

            # Get the inputs to the node.
            inputs = [core.nested_lookup(key, outputs) for key in keys]

            # Build the processor only if called the first time in a @tf.function.
            # Need to explicitly build because we use get_controls() and get_signal()
            # seperately, (to get intermediates) rather than directly using call().
            if not processor.built:
                processor.build([tensor.shape for tensor in inputs])

            # Run processor.
            controls = processor.get_controls(*inputs)
            signal = processor.get_signal(**controls)

            #  Add outputs to the dictionary.
            outputs[processor.name] = {'controls': controls, 'signal': signal}

        # Get output signal from last processor.
        output_name = self.processors[-1].name
        outputs[self.name] = {'signal': outputs[output_name]['signal']}

        # Logging, only on the first call.
        if not self.built:
            logging.info('ProcessorGroup output node (%s)', output_name)

        return outputs
예제 #4
0
파일: processors.py 프로젝트: noetits/ddsp
    def get_outputs(self, dag_inputs: TensorDict) -> TensorDict:
        """Run the DAG and get complete outputs dictionary for the processor_group.

    Args:
      dag_inputs: A dictionary of input tensors fed to the signal processing
        processor_group.

    Returns:
      outputs: A nested dictionary of all the output tensors.
    """
        # Initialize the outputs with inputs to the processor_group.
        outputs = dag_inputs

        # Run through the DAG nodes in sequential order.
        for node in self.dag:
            # Get the node processor and keys to the node input.
            processor, keys = node

            # Logging.
            logging.info('Connecting node (%s):', processor.name)
            for i, key in enumerate(keys):
                logging.info('Input %d: %s', i, key)

            # Get the inputs to the node.
            inputs = [core.nested_lookup(key, outputs) for key in keys]

            # Build the processor (does nothing if not the first time).
            processor.build([tensor.shape for tensor in inputs])
            # Run processor.
            controls = processor.get_controls(*inputs)
            signal = processor.get_signal(**controls)

            #  Add outputs to the dictionary.
            outputs[processor.name] = {'controls': controls, 'signal': signal}

        # Get output signal from last processor.
        output_name = self.processors[-1].name
        outputs[self.name] = {'signal': outputs[output_name]['signal']}
        logging.info('ProcessorGroup output node (%s)', output_name)

        return outputs
예제 #5
0
    def __call__(self, *inputs, **kwargs):
        """Wrap the layer's __call__() with dictionary inputs and outputs.

    IMPORTANT: If no input_keys are provided to the constructor, they are
    inferred from the argument names in call(). If no output_keys are provided
    to the constructor, they are inferred from return annotation of call()
    (a list of strings).

    Example:
    ========
    ```
    def call(self, f0_hz, loudness, power=None) -> ['amps', 'frequencies']:
      ...
      return amps, frequencies
    ```
    Will infer `self.input_keys = ['f0_hz', 'loudness']` and
    `self.output_keys = ['amps', 'frequencies']`. If input_keys, or output_keys
    are provided to the constructor they will override these inferred values.
    It will also infer `self.default_input_keys = ['power']`, which it will try
    to look up the inputs, but use the default values and not throw an error if
    the key is not in the input dictionary.

    Example Usage:
    ==============
    The the example above works with both tensor inputs `layer(f0_hz, loudness)`
    or `layer(f0_hz, loudness, power)` or a dictionary of tensors
    `layer({'f0_hz':..., 'loudness':...})`, or
    `layer({'f0_hz':..., 'loudness':..., 'power':...})` and in both cases will
    return a dictionary of tensors `{'amps':..., 'frequencies':...}`.

    Args:
      *inputs: Arguments passed on to call(). If any arguments are dicts, they
        will be merged and self.input_keys will be read out of them and passed
        to call() while other args will be ignored.
      **kwargs: Keyword arguments passed on to call().

    Returns:
      outputs: A dictionary of layer outputs from call(). If the layer call()
        returns a dictionary it will be returned directly, otherwise the output
        tensors will be wrapped in a dictionary {output_key: output_tensor}.
    """
        # Construct a list of input tensors equal in length and order to the `call`
        # input signature.
        # -- Start first with any tensor arguments.
        # -- Then lookup tensors from input dictionaries.
        # -- Use default values if not found.

        # Start by merging all dictionaries of tensors from the input.
        input_dict = {}
        for v in inputs:
            if isinstance(v, dict):
                input_dict.update(v)

        # And then strip all dictionaries from the input.
        inputs = [v for v in inputs if not isinstance(v, dict)]

        # Add any tensors from kwargs.
        for key in self.all_input_keys:
            if key in kwargs:
                input_dict[key] = kwargs[key]

        # And strip from kwargs.
        kwargs = {
            k: v
            for k, v in kwargs.items() if k not in self.all_input_keys
        }

        # Look up further inputs from the dictionaries.
        for key in self.input_keys:
            try:
                # If key is present use the input_dict value.
                inputs.append(core.nested_lookup(key, input_dict))
            except KeyError:
                # Skip if not present.
                pass

        # Add default arguments.
        for key, value in zip(self.default_input_keys,
                              self.default_input_values):
            try:
                # If key is present, use the input_dict value.
                inputs.append(core.nested_lookup(key, input_dict))
            except KeyError:
                # Otherwise use the default value if not supplied as non-dict input.
                if len(inputs) < self.n_inputs:
                    inputs.append(value)

        # Run input tensors through the model.
        if len(inputs) != self.n_inputs:
            raise TypeError(
                f'{len(inputs)} input tensors extracted from inputs'
                '(including default args) but the layer expects '
                f'{self.n_inputs} tensors.\n'
                f'Input keys: {self.input_keys}\n'
                f'Default keys: {self.default_input_keys}\n'
                f'Default values: {self.default_input_values}\n'
                f'Input dictionaries: {input_dict}\n'
                f'Input Tensors (Args, Dicts, and Defaults): {inputs}\n')
        outputs = super().__call__(*inputs, **kwargs)

        # Return dict if call() returns it.
        if isinstance(outputs, dict):
            return outputs
        # Otherwise make a dict from output_keys.
        else:
            outputs = core.make_iterable(outputs)
            if len(self.output_keys) != len(outputs):
                raise ValueError(
                    f'Output keys ({self.output_keys}) must have the same'
                    f'length as outputs ({outputs})')
            return dict(zip(self.output_keys, outputs))
예제 #6
0
 def _check_tensor_outputs(self, strings_to_check, outputs):
     for tensor_string in strings_to_check:
         tensor = core.nested_lookup(tensor_string, outputs)
         self.assertIsInstance(tensor, (np.ndarray, tf.Tensor))
예제 #7
0
파일: dags.py 프로젝트: edwar64896/ddsp
    def run_dag(self,
                inputs: TensorDict,
                verbose: bool = True,
                **kwargs) -> TensorDict:
        """Connects and runs submodules of dag.

    Args:
      inputs: A dictionary of input tensors fed to the dag.
      verbose: Print out dag routing when running.
      **kwargs: Other kwargs to pass to submodules, such as keras kwargs.

    Returns:
      A nested dictionary of all the output tensors.
    """
        # Initialize the outputs with inputs to the dag.
        outputs = {'inputs': inputs}
        # TODO(jesseengel): Remove this cluttering of the base namespace. Only there
        # for backwards compatability.
        outputs.update(inputs)

        # Run through the DAG nodes in sequential order.
        for node in self.dag:
            # The first element of the node can be either a module or module_key.
            module_key, input_keys = node[0], node[1]
            module = getattr(self, module_key)
            # Optionally specify output keys if module does not return dict.
            output_keys = node[2] if len(node) > 2 else None

            # Get the inputs to the node.
            inputs = [core.nested_lookup(key, outputs) for key in input_keys]

            if verbose:
                shape = lambda d: tf.nest.map_structure(
                    lambda x: list(x.shape), d)
                logging.info('Input to Module: %s\nKeys: %s\nIn: %s\n',
                             module_key, input_keys, shape(inputs))

            if is_processor(module):
                # Processor modules.
                module_outputs = module(*inputs,
                                        return_outputs_dict=True,
                                        **kwargs)
            else:
                # Network modules.
                module_outputs = module(*inputs, **kwargs)

            if not isinstance(module_outputs, dict):
                module_outputs = core.to_dict(module_outputs, output_keys)

            if verbose:
                logging.info('Output from Module: %s\nOut: %s\n', module_key,
                             shape(module_outputs))

            # Add module outputs to the dictionary.
            outputs[module_key] = module_outputs

        # Alias final module output as dag output.
        # 'out' is a reserved key for final dag output.
        outputs['out'] = module_outputs

        return outputs