Example #1
0
def test_function_names():
    def my_func(x):
        return x

    class MyFunc:
        def __call__(self, x):
            return x

    assert utils.function_name(my_func) == "my_func"
    assert utils.function_name(MyFunc()) == "MyFunc"
Example #2
0
def test_function_names():
    def my_func(x):
        return x

    class MyFunc:
        def __call__(self, x):
            return x

    assert utils.function_name(my_func) == "my_func"
    assert utils.function_name(MyFunc()) == "MyFunc"
Example #3
0
    def build_pre(self, signals, config):
        super().build_pre(signals, config)

        logger.debug("t %s", [op.t for op in self.ops])
        logger.debug("x %s", [op.x for op in self.ops])
        logger.debug("fn %s", [op.fn for op in self.ops])

        self.time_data = (None if self.ops[0].t is None else
                          signals[self.ops[0].t].reshape(()))
        self.input_data = signals.combine([op.x for op in self.ops])

        if self.ops[0].output is not None:
            self.output_data = signals.combine([op.output for op in self.ops])
            self.output_dtype = self.output_data.dtype
        else:
            self.output_data = None
            self.output_dtype = signals.dtype

        def merged_func(time, inputs):  # pragma: no cover (runs in TF)
            outputs = []
            offset = 0
            for op in self.ops:
                if op.output is None:
                    func = op.fn
                else:
                    func = utils.align_func(self.output_dtype)(op.fn)

                func_input = inputs[:, offset:offset + op.x.shape[0]]
                offset += op.x.shape[0]

                mini_out = []
                for j in range(signals.minibatch_size):
                    if op.t is None:
                        func_out = func(func_input[j])
                    else:
                        func_out = func(time, func_input[j])

                    func_out = np.atleast_1d(func_out)

                    if op.output is None:
                        # just return time as a noop (since we need to
                        # return something)
                        func_out = [time]
                    mini_out += [func_out]
                outputs += [np.stack(mini_out, axis=0)]

            return np.concatenate(outputs, axis=1)

        self.merged_func = merged_func
        self.merged_func.__name__ = "_".join(
            [utils.function_name(op.fn) for op in self.ops])
        self.output_shape = (signals.minibatch_size, )
        self.output_shape += ((len(self.ops), ) if self.output_data is None
                              else self.output_data.shape)
Example #4
0
    def __init__(self, ops, signals, config):
        super(SimPyFuncBuilder, self).__init__(ops, signals, config)

        logger.debug("t %s", [op.t for op in ops])
        logger.debug("x %s", [op.x for op in ops])
        logger.debug("fn %s", [op.fn for op in ops])

        self.time_input = ops[0].t is not None
        self.input_data = signals.combine([op.x for op in ops])

        if ops[0].output is not None:
            self.output_data = signals.combine([op.output for op in ops])
            self.output_dtype = self.output_data.dtype
        else:
            self.output_data = None
            self.output_dtype = signals.dtype

        def merged_func(time, inputs):  # pragma: no cover
            outputs = []
            offset = 0
            for op in ops:
                if op.output is None:
                    func = op.fn
                else:
                    func = utils.align_func(op.output.shape,
                                            self.output_dtype)(op.fn)

                func_input = inputs[offset:offset + op.x.shape[0]]
                offset += op.x.shape[0]

                mini_out = []
                for j in range(signals.minibatch_size):
                    if op.t is None:
                        func_out = func(func_input[..., j])
                    else:
                        func_out = func(time, func_input[..., j])

                    if op.output is None:
                        # just return time as a noop (since we need to
                        # return something)
                        func_out = time
                    mini_out += [func_out]
                outputs += [np.stack(mini_out, axis=-1)]

            return np.concatenate(outputs, axis=0)

        self.merged_func = merged_func
        self.merged_func.__name__ = "_".join(
            [utils.function_name(op.fn) for op in ops])
        self.output_shape = ((len(ops), ) if self.output_data is None else
                             self.output_data.shape)
        self.output_shape += (signals.minibatch_size, )
Example #5
0
    def __init__(self, ops, signals, config):
        super(SimPyFuncBuilder, self).__init__(ops, signals, config)

        logger.debug("t %s", [op.t for op in ops])
        logger.debug("x %s", [op.x for op in ops])
        logger.debug("fn %s", [op.fn for op in ops])

        self.time_input = ops[0].t is not None
        self.input_data = signals.combine([op.x for op in ops])

        if ops[0].output is not None:
            self.output_data = signals.combine([op.output for op in ops])
            self.output_dtype = self.output_data.dtype
        else:
            self.output_data = None
            self.output_dtype = signals.dtype

        def merged_func(time, inputs):  # pragma: no cover
            outputs = []
            offset = 0
            for op in ops:
                if op.output is None:
                    func = op.fn
                else:
                    func = utils.align_func(
                        op.output.shape, self.output_dtype)(op.fn)

                func_input = inputs[offset:offset + op.x.shape[0]]
                offset += op.x.shape[0]

                mini_out = []
                for j in range(signals.minibatch_size):
                    if op.t is None:
                        func_out = func(func_input[..., j])
                    else:
                        func_out = func(time, func_input[..., j])

                    if op.output is None:
                        # just return time as a noop (since we need to
                        # return something)
                        func_out = time
                    mini_out += [func_out]
                outputs += [np.stack(mini_out, axis=-1)]

            return np.concatenate(outputs, axis=0)

        self.merged_func = merged_func
        self.merged_func.__name__ = "_".join(
            [utils.function_name(op.fn) for op in ops])
        self.output_shape = ((len(ops),) if self.output_data is None else
                             self.output_data.shape)
        self.output_shape += (signals.minibatch_size,)
Example #6
0
    def build_outputs(self, outputs):
        """
        Adds elements into the graph to compute the given outputs.

        Parameters
        ----------
        outputs : dict of {(tuple of) `~nengo.Probe`: callable or None}
            The output function to be applied to each probe or group of probes.
            The function can accept one argument (the output of that probe) or
            two (output and target values for that probe).  If a tuple of
            Probes are given as the key, then those output/target parameters
            will be the corresponding tuple of probe/target values.  The
            function should return a ``tf.Tensor`` or tuple of Tensors
            representing the output we want from those probes.  If ``None`` is
            given instead of a function then the output will simply be the
            output value from the corresponding probes.

        Returns
        -------
        output_vals : dict of {(tuple of) `~nengo.Probe`: \
                               (tuple of) ``tf.Tensor``}
            Tensors representing the result of applying the output functions
            to the probes.
        new_vars_init : ``tf.Tensor`` or None
            Initialization op for any new variables created when building
            the outputs.

        Notes
        -----
        This function caches its outputs, so if it is called again with the
        same arguments then it will return the previous Tensors.  This avoids
        building duplicates of the same operations over and over.  This can
        also be important functionally, e.g. if the outputs have internal
        state.  By caching the output we ensure that subsequent
        calls share the same internal state.
        """

        key = frozenset(outputs.items())

        try:
            # return the cached outputs if they exist
            return self.outputs[key], None
        except KeyError:
            pass

        output_vals = {}
        new_vars = []
        for probes, out in outputs.items():
            is_tuple = isinstance(probes, tuple)
            probe_arrays = (tuple(
                self.probe_arrays[p]
                for p in probes) if is_tuple else self.probe_arrays[probes])

            if out is None:
                # return probe output value
                output_vals[probes] = probe_arrays
            elif callable(out):
                # look up number of positional arguments for function
                spec = inspect.getfullargspec(out)

                nargs = len(spec.args)
                if spec.defaults is not None:
                    # don't count keyword arguments
                    nargs -= len(spec.defaults)
                if inspect.ismethod(out) or not inspect.isroutine(out):
                    # don't count self argument for methods or callable classes
                    nargs -= 1

                # build function arguments
                if nargs == 1:
                    args = [probe_arrays]
                elif nargs == 2:
                    for p in probes if is_tuple else (probes, ):
                        # create a placeholder for the target values if one
                        # hasn't been created yet
                        if p not in self.target_phs:
                            self.target_phs[p] = tf.placeholder(
                                self.dtype,
                                (self.minibatch_size, None, p.size_in),
                                name="%s_ph" % utils.sanitize_name(p))
                    target_phs = (tuple(self.target_phs[p] for p in probes)
                                  if is_tuple else self.target_phs[probes])
                    args = [probe_arrays, target_phs]
                else:
                    raise ValidationError(
                        "Output functions must accept 1 or 2 arguments; '%s' "
                        "takes %s arguments" %
                        (utils.function_name(out, sanitize=False), nargs),
                        "outputs")

                # apply output function
                with tf.variable_scope(utils.function_name(out)) as scope:
                    output_vals[probes] = out(*args)

                # collect any new variables from building the outputs
                for collection in [
                        tf.GraphKeys.GLOBAL_VARIABLES,
                        tf.GraphKeys.LOCAL_VARIABLES, "gradient_vars"
                ]:
                    new_vars.extend(scope.get_collection(collection))
            else:
                raise ValidationError("Outputs must be callable or None)",
                                      "outputs")

        new_vars_init = (tf.variables_initializer(new_vars)
                         if len(new_vars) > 0 else None)

        self.outputs[key] = output_vals

        return output_vals, new_vars_init
Example #7
0
    def build_outputs(self, outputs):
        """
        Adds elements into the graph to compute the given outputs.

        Parameters
        ----------
        outputs : dict of {(tuple of) `~nengo.Probe`: callable or None}
            The output function to be applied to each probe or group of probes.
            The function can accept one argument (the output of that probe) or
            two (output and target values for that probe).  If a tuple of
            Probes are given as the key, then those output/target parameters
            will be the corresponding tuple of probe/target values.  The
            function should return a ``tf.Tensor`` or tuple of Tensors
            representing the output we want from those probes.  If ``None`` is
            given instead of a function then the output will simply be the
            output value from the corresponding probes.

        Returns
        -------
        output_vals : dict of {(tuple of) `~nengo.Probe`: \
                               (tuple of) ``tf.Tensor``}
            Tensors representing the result of applying the output functions
            to the probes.
        new_vars_init : ``tf.Tensor`` or None
            Initialization op for any new variables created when building
            the outputs.

        Notes
        -----
        This function caches its outputs, so if it is called again with the
        same arguments then it will return the previous Tensors.  This avoids
        building duplicates of the same operations over and over.  This can
        also be important functionally, e.g. if the outputs have internal
        state.  By caching the output we ensure that subsequent
        calls share the same internal state.
        """

        key = frozenset(outputs.items())

        try:
            # return the cached outputs if they exist
            return self.outputs[key], None
        except KeyError:
            pass

        output_vals = {}
        new_vars = []
        for probes, out in outputs.items():
            is_tuple = isinstance(probes, tuple)
            probe_arrays = (
                tuple(self.probe_arrays[p] for p in probes) if is_tuple else
                self.probe_arrays[probes])

            if out is None:
                # return probe output value
                output_vals[probes] = probe_arrays
            elif callable(out):
                # look up number of positional arguments for function
                spec = inspect.getfullargspec(out)

                nargs = len(spec.args)
                if spec.defaults is not None:
                    # don't count keyword arguments
                    nargs -= len(spec.defaults)
                if inspect.ismethod(out) or not inspect.isroutine(out):
                    # don't count self argument for methods or callable classes
                    nargs -= 1

                # build function arguments
                if nargs == 1:
                    args = [probe_arrays]
                elif nargs == 2:
                    for p in probes if is_tuple else (probes,):
                        # create a placeholder for the target values if one
                        # hasn't been created yet
                        if p not in self.target_phs:
                            self.target_phs[p] = tf.placeholder(
                                self.dtype,
                                (self.minibatch_size, None, p.size_in),
                                name="%s_ph" % utils.sanitize_name(p))
                    target_phs = (tuple(self.target_phs[p] for p in probes)
                                  if is_tuple else self.target_phs[probes])
                    args = [probe_arrays, target_phs]
                else:
                    raise ValidationError(
                        "Output functions must accept 1 or 2 arguments; '%s' "
                        "takes %s arguments" % (
                            utils.function_name(out, sanitize=False), nargs),
                        "outputs")

                # apply output function
                with tf.variable_scope(utils.function_name(out)) as scope:
                    output_vals[probes] = out(*args)

                # collect any new variables from building the outputs
                for collection in [tf.GraphKeys.GLOBAL_VARIABLES,
                                   tf.GraphKeys.LOCAL_VARIABLES,
                                   "gradient_vars"]:
                    new_vars.extend(scope.get_collection(collection))
            else:
                raise ValidationError("Outputs must be callable or None)",
                                      "outputs")

        new_vars_init = (tf.variables_initializer(new_vars)
                         if len(new_vars) > 0 else None)

        self.outputs[key] = output_vals

        return output_vals, new_vars_init