Example #1
0
    def compute_partials(self, inputs, partials):
        """
        Collect computed partial derivatives and return them.

        Checks if the needed derivatives are cached already based on the
        inputs vector. Refreshes the cache by re-computing the current point
        if necessary.
        """
        pt = np.array([inputs[pname].flatten() for pname in self.pnames]).T
        if self.metadata['training_data_gradients']:
            dy_ddata = np.zeros(self.sh)
            for j in range(self.metadata['num_nodes']):
                for i, axis in enumerate(self.params):
                    e_i = np.eye(axis.size)
                    interp = make_interp_spline(axis,
                                                e_i,
                                                k=self._ki[i],
                                                axis=0)
                    if i == 0:
                        val = interp(pt[j, i])
                    else:
                        val = np.outer(val, interp(pt[j, i]))
                dy_ddata[j] = val.reshape(self.sh[1:])

        for out_name in self.interps:
            dval = self.interps[out_name].gradient(pt).T
            for i, p in enumerate(self.pnames):
                partials[out_name, p] = np.diag(dval[i])

            if self.metadata['training_data_gradients']:
                partials[out_name, "%s_train" % out_name] = dy_ddata
    def compute_partials(self, inputs, partials):
        """
        Collect computed partial derivatives and return them.

        Checks if the needed derivatives are cached already based on the
        inputs vector. Refreshes the cache by re-computing the current point
        if necessary.

        Parameters
        ----------
        inputs : Vector
            unscaled, dimensional input variables read via inputs[key]
        partials : Jacobian
            sub-jac components written to partials[output_name, input_name]
        """
        pt = np.array([inputs[pname].flatten() for pname in self.pnames]).T
        if self.options['training_data_gradients']:
            dy_ddata = np.zeros(self.sh)
            for j in range(self.options['vec_size']):
                for i, axis in enumerate(self.params):
                    e_i = np.eye(axis.size)
                    interp = make_interp_spline(axis,
                                                e_i,
                                                k=self._ki[i],
                                                axis=0)
                    if i == 0:
                        val = interp(pt[j, i])
                    else:
                        val = np.outer(val, interp(pt[j, i]))
                dy_ddata[j] = val.reshape(self.sh[1:])

        for out_name in self.interps:
            dval = self.interps[out_name].gradient(pt).T
            for i, p in enumerate(self.pnames):
                partials[out_name, p] = dval[i]

            if self.options['training_data_gradients']:
                partials[out_name, "%s_train" % out_name] = dy_ddata
Example #3
0
    def compute_partials(self, inputs, partials):
        """
        Collect computed partial derivatives and return them.

        Checks if the needed derivatives are cached already based on the
        inputs vector. Refreshes the cache by re-computing the current point
        if necessary.

        Parameters
        ----------
        inputs : Vector
            unscaled, dimensional input variables read via inputs[key]
        partials : Jacobian
            sub-jac components written to partials[output_name, input_name]
        """
        pt = np.array([inputs[pname].flatten() for pname in self.pnames]).T
        if self.metadata['training_data_gradients']:
            dy_ddata = np.zeros(self.sh)
            for j in range(self.metadata['num_nodes']):
                for i, axis in enumerate(self.params):
                    e_i = np.eye(axis.size)
                    interp = make_interp_spline(axis,
                                                e_i,
                                                k=self._ki[i],
                                                axis=0)
                    if i == 0:
                        val = interp(pt[j, i])
                    else:
                        val = np.outer(val, interp(pt[j, i]))
                dy_ddata[j] = val.reshape(self.sh[1:])

        for out_name in self.interps:
            dval = self.interps[out_name].gradient(pt).T
            for i, p in enumerate(self.pnames):
                partials[out_name, p] = dval[i]

            if self.metadata['training_data_gradients']:
                partials[out_name, "%s_train" % out_name] = dy_ddata