コード例 #1
0
    def matvecFWD(self, arg):
        '''Callback function for performing the matrix vector product of the
        workflow's full Jacobian with an incoming vector arg.'''

        comps = self._comp_edge_list()

        if '@fake' in comps:
            del comps['@fake']
        result = zeros(len(arg))

        # We can call applyJ on each component one-at-a-time, and poke the
        # results into the result vector.
        for compname, data in comps.iteritems():

            comp_inputs = data['inputs']
            comp_outputs = data['outputs']
            comp_residuals = data['residuals']

            inputs = {}
            outputs = {}
            out_bounds = []

            for varname in comp_inputs:
                node = '%s.%s' % (compname, varname)
                i1, i2 = self.get_bounds(node)

                if isinstance(i1, list):
                    inputs[varname] = arg[i1].copy()
                else:
                    inputs[varname] = arg[i1:i2].copy()

            for varname in comp_outputs:
                node = '%s.%s' % (compname, varname)
                i1, i2 = self.get_bounds(node)
                out_bounds.append((varname, i1, i2))

                if isinstance(i1, list):
                    if varname in comp_residuals:
                        outputs[varname] = zeros((1, 1))
                    else:
                        inputs[varname] = arg[i1].copy()
                        outputs[varname] = arg[i1].copy()
                else:
                    if varname in comp_residuals:
                        outputs[varname] = zeros((i2 - i1))
                    else:
                        inputs[varname] = arg[i1:i2].copy()
                        outputs[varname] = arg[i1:i2].copy()

            if '~' in compname:
                comp = self._derivative_graph.node[compname]['pa_object']
            else:
                comp = self.scope.get(compname)

            # Preconditioning
            # Currently not implemented in forward mode, mostly because this
            # mode requires post multiplication of the result by the M after
            # you have the final gradient.
            #if hasattr(comp, 'applyMinv'):
            #inputs = applyMinv(comp, inputs)

            applyJ(comp, inputs, outputs, comp_residuals,
                   self._shape_cache.get(compname),
                   self._J_cache.get(compname))
            #print inputs, outputs

            for varname, i1, i2 in out_bounds:
                if isinstance(i1, list):
                    result[i1] = outputs[varname].copy()
                else:
                    result[i1:i2] = outputs[varname].copy()

        # Each parameter adds an equation
        for src, targets in self._edges.iteritems():
            if '@in' in src or '@fake' in src:
                if not isinstance(targets, list):
                    targets = [targets]

                for target in targets:
                    i1, i2 = self.get_bounds(target)
                    #if isinstance(i1, list):
                    #    result[i1] = arg[i1]
                    #else:
                    result[i1:i2] = arg[i1:i2]

        #print arg, result
        return result
コード例 #2
0
    def _matvecFWD(self, arg):
        '''Callback function for performing the matrix vector product of the
        state-to-residual Jacobian with an incoming vector arg.'''

        result = np.zeros(len(arg))

        comp_inputs = self.list_states()
        comp_outputs = self.list_residuals()
        inputs = {}
        outputs = {}

        idx = 0

        for varname in comp_inputs:
            val = getattr(self, varname)
            flatval = flattened_value(varname, val)
            size = len(flatval)

            i1, i2 = idx, idx + size
            inputs[varname] = arg[i1:i2].copy()

            idx += size

        for varname in comp_outputs:
            val = getattr(self, varname)
            flatval = flattened_value(varname, val)
            size = len(flatval)

            i1, i2 = idx, idx + size
            inputs[varname] = arg[i1:i2].copy()
            outputs[varname] = arg[i1:i2].copy()

            idx += size

        applyJ(self, inputs, outputs, [], self._shape_cache, J=self._cache_J)
        #print inputs, outputs

        idx = 0

        # Each state input adds an equation
        for varname in comp_inputs:
            val = getattr(self, varname)
            flatval = flattened_value(varname, val)
            size = len(flatval)

            i1, i2 = idx, idx + size
            result[i1:i2] = arg[i1:i2].copy()

            idx += size

        for varname in comp_outputs:
            val = getattr(self, varname)
            flatval = flattened_value(varname, val)
            size = len(flatval)

            i1, i2 = idx, idx + size
            result[i1:i2] = outputs[varname]

            idx += size

        #print arg, result
        return result
コード例 #3
0
 def applyJ(self, system, variables):
     """ Wrapper for component derivative specification methods.
     Forward Mode.
     """
     applyJ(system, variables)
コード例 #4
0
    def _matvecFWD(self, arg):
        '''Callback function for performing the matrix vector product of the
        state-to-residual Jacobian with an incoming vector arg.'''

        result = np.zeros(len(arg))

        comp_inputs = self.list_states()
        comp_outputs = self.list_residuals()
        inputs = {}
        outputs = {}

        idx = 0

        for varname in comp_inputs:
            val = getattr(self, varname)
            flatval = flattened_value(varname, val)
            size = len(flatval)

            i1, i2 = idx, idx + size
            inputs[varname] = arg[i1:i2].copy()

            idx += size

        for varname in comp_outputs:
            val = getattr(self, varname)
            flatval = flattened_value(varname, val)
            size = len(flatval)

            i1, i2 = idx, idx + size
            inputs[varname] = arg[i1:i2].copy()
            outputs[varname] = arg[i1:i2].copy()

            idx += size

        applyJ(self, inputs, outputs, [], self._shape_cache, J=self._cache_J)
        #print inputs, outputs

        idx = 0

        # Each state input adds an equation
        for varname in comp_inputs:
            val = getattr(self, varname)
            flatval = flattened_value(varname, val)
            size = len(flatval)

            i1, i2 = idx, idx + size
            result[i1:i2] = arg[i1:i2].copy()

            idx += size

        for varname in comp_outputs:
            val = getattr(self, varname)
            flatval = flattened_value(varname, val)
            size = len(flatval)

            i1, i2 = idx, idx + size
            result[i1:i2] = outputs[varname]

            idx += size

        #print arg, result
        return result
コード例 #5
0
    def matvecFWD(self, arg):
        '''Callback function for performing the matrix vector product of the
        workflow's full Jacobian with an incoming vector arg.'''

        comps = self._comp_edge_list()

        if '@fake' in comps:
            del comps['@fake']
        result = zeros(len(arg))

        # We can call applyJ on each component one-at-a-time, and poke the
        # results into the result vector.
        for compname, data in comps.iteritems():

            comp_inputs = data['inputs']
            comp_outputs = data['outputs']
            comp_residuals = data['residuals']

            inputs = {}
            outputs = {}
            out_bounds = []

            for varname in comp_inputs:
                node = '%s.%s' % (compname, varname)
                i1, i2 = self.get_bounds(node)

                if isinstance(i1, list):
                    inputs[varname] = arg[i1].copy()
                else:
                    inputs[varname] = arg[i1:i2].copy()

            for varname in comp_outputs:
                node = '%s.%s' % (compname, varname)
                i1, i2 = self.get_bounds(node)
                out_bounds.append((varname, i1, i2))

                if isinstance(i1, list):
                    if varname in comp_residuals:
                        outputs[varname] = zeros((1, 1))
                    else:
                        inputs[varname] = arg[i1].copy()
                        outputs[varname] = arg[i1].copy()
                else:
                    if varname in comp_residuals:
                        outputs[varname] = zeros((i2-i1))
                    else:
                        inputs[varname] = arg[i1:i2].copy()
                        outputs[varname] = arg[i1:i2].copy()

            if '~' in compname:
                comp = self._derivative_graph.node[compname]['pa_object']
            else:
                comp = self.scope.get(compname)

            # Preconditioning
            # Currently not implemented in forward mode, mostly because this
            # mode requires post multiplication of the result by the M after
            # you have the final gradient.
            #if hasattr(comp, 'applyMinv'):
                #inputs = applyMinv(comp, inputs)

            applyJ(comp, inputs, outputs, comp_residuals,
                   self._shape_cache.get(compname), self._J_cache.get(compname))
            #print inputs, outputs

            for varname, i1, i2 in out_bounds:
                if isinstance(i1, list):
                    result[i1] = outputs[varname].copy()
                else:
                    result[i1:i2] = outputs[varname].copy()

        # Each parameter adds an equation
        for src, targets in self._edges.iteritems():
            if '@in' in src or '@fake' in src:
                if not isinstance(targets, list):
                    targets = [targets]

                for target in targets:
                    i1, i2 = self.get_bounds(target)
                    result[i1:i2] = arg[i1:i2]

        #print arg, result
        return result
コード例 #6
0
 def applyJ(self, system, variables):
     """ Wrapper for component derivative specification methods.
     Forward Mode.
     """
     applyJ(system, variables)