def __init__(self, system, inputs, outputs):
        """ Performs finite difference on the components in a given
        pseudo_assembly. """

        self.inputs = inputs
        self.outputs = outputs
        self.in_bounds = {}

        self.system = system
        self.scope = system.scope

        in_size = 0
        for srcs in self.inputs:

            # Support for parameter groups
            if isinstance(srcs, basestring):
                srcs = [srcs]

            val = self.scope.get(srcs[0])
            width = flattened_size(srcs[0], val, self.scope)

            for src in srcs:
                self.in_bounds[src] = (in_size, in_size+width)
            in_size += width

        out_size = 0
        for src in self.outputs:
            val = self.scope.get(src)
            width = flattened_size(src, val)
            out_size += width

        self.y_base = zeros((out_size,))
        self.y = zeros((out_size,))
        self.y2 = zeros((out_size,))
Exemplo n.º 2
0
    def provideJ(self):
        """Calculate analytical first derivatives."""

        if self.Jsize is None:
            n_in = 0
            n_out = 0
            for varname in self.list_inputs():
                val = self.get(varname)
                width = flattened_size(varname, val, self)
                n_in += width
            for varname in self.list_outputs():
                val = self.get(varname)
                width = flattened_size(varname, val, self)
                n_out += width
            self.Jsize = (n_out, n_in)

        J = zeros(self.Jsize)
        grad = self._srcexpr.evaluate_gradient()

        i = 0
        for varname in self._inputs:
            val = self.get(varname)
            width = flattened_size(varname, val, self)
            J[:, i:i+width] = grad[varname]
            i += width

        return J
Exemplo n.º 3
0
    def provideJ(self):
        """Calculate analytical first derivatives."""

        if self.Jsize is None:
            n_in = 0
            n_out = 0
            for varname in self.list_inputs():
                val = self.get(varname)
                width = flattened_size(varname, val, self)
                n_in += width
            for varname in self.list_outputs():
                val = self.get(varname)
                width = flattened_size(varname, val, self)
                n_out += width
            self.Jsize = (n_out, n_in)

        J = zeros(self.Jsize)
        grad = self._srcexpr.evaluate_gradient()

        i = 0
        for varname in self._inputs:
            val = self.get(varname)
            width = flattened_size(varname, val, self)
            J[:, i:i + width] = grad[varname]
            i += width

        return J
    def __init__(self, pa):
        """ Performs finite difference on the components in a given
        pseudo_assembly. """

        self.inputs = pa.inputs
        self.outputs = pa.outputs
        self.in_bounds = {}
        self.out_bounds = {}
        self.pa = pa
        self.scope = pa.wflow.scope

        in_size = 0
        for srcs in self.inputs:

            # Support for parameter groups
            if isinstance(srcs, basestring):
                srcs = [srcs]

            val = self.scope.get(srcs[0])
            width = flattened_size(srcs[0], val, self.scope)

            for src in srcs:
                self.in_bounds[src] = (in_size, in_size+width)
            in_size += width

        out_size = 0
        for src in self.outputs:
            val = self.scope.get(src)
            width = flattened_size(src, val)
            self.out_bounds[src] = (out_size, out_size+width)
            out_size += width

        self.y_base = zeros((out_size,))
        self.y = zeros((out_size,))
        self.y2 = zeros((out_size,))
Exemplo n.º 5
0
def get_bounds(obj, input_keys, output_keys, J):
    """ Returns a pair of dictionaries that contain the stop and end index
    for each input and output in a pair of lists.
    """

    ibounds = {}
    nvar = 0
    scope = getattr(obj, 'parent', None)

    for key in input_keys:

        # For parameter group, all should be equal so just get first.
        if not isinstance(key, tuple):
            key = [key]

        val = obj.get(key[0])

        width = flattened_size('.'.join((obj.name, key[0])), val,
                               scope=scope)
        shape = getattr(val, 'shape', None)
        for item in key:
            ibounds[item] = (nvar, nvar+width, shape)
        nvar += width

    num_input = nvar

    obounds = {}
    nvar = 0
    for key in output_keys:
        val = obj.get(key)
        width = flattened_size('.'.join((obj.name, key)), val)
        shape = getattr(val, 'shape', None)
        obounds[key] = (nvar, nvar+width, shape)
        nvar += width

    num_output = nvar

    if num_input and num_output:
        # Give the user an intelligible error if the size of J is wrong.
        try:
            J_output, J_input = J.shape
        except ValueError as err:
            exc_type, value, traceback = sys.exc_info()
            msg = "Jacobian has the wrong dimensions. Expected 2D but got {}D."
            msg = msg.format(J.ndim)

            raise ValueError, ValueError(msg), traceback

        if num_output != J_output or num_input != J_input:
            msg = 'Jacobian is the wrong size. Expected ' + \
                '(%dx%d) but got (%dx%d)' % (num_output, num_input,
                                             J_output, J_input)
            if ISystem.providedBy(obj):
                raise RuntimeError(msg)
            else:
                obj.raise_exception(msg, RuntimeError)

    return ibounds, obounds
Exemplo n.º 6
0
def get_bounds(obj, input_keys, output_keys, J):
    """ Returns a pair of dictionaries that contain the stop and end index
    for each input and output in a pair of lists.
    """

    ibounds = {}
    nvar = 0
    scope = getattr(obj, 'parent', None)

    for key in input_keys:

        # For parameter group, all should be equal so just get first.
        if not isinstance(key, tuple):
            key = [key]

        val = obj.get(key[0])

        width = flattened_size('.'.join((obj.name, key[0])), val, scope=scope)
        shape = getattr(val, 'shape', None)
        for item in key:
            ibounds[item] = (nvar, nvar + width, shape)
        nvar += width

    num_input = nvar

    obounds = {}
    nvar = 0
    for key in output_keys:
        val = obj.get(key)
        width = flattened_size('.'.join((obj.name, key)), val)
        shape = getattr(val, 'shape', None)
        obounds[key] = (nvar, nvar + width, shape)
        nvar += width

    num_output = nvar

    if num_input and num_output:
        # Give the user an intelligible error if the size of J is wrong.
        try:
            J_output, J_input = J.shape
        except ValueError as err:
            exc_type, value, traceback = sys.exc_info()
            msg = "Jacobian has the wrong dimensions. Expected 2D but got {}D."
            msg = msg.format(J.ndim)

            raise ValueError, ValueError(msg), traceback

        if num_output != J_output or num_input != J_input:
            msg = 'Jacobian is the wrong size. Expected ' + \
                '(%dx%d) but got (%dx%d)' % (num_output, num_input,
                                             J_output, J_input)
            if ISystem.providedBy(obj):
                raise RuntimeError(msg)
            else:
                obj.raise_exception(msg, RuntimeError)

    return ibounds, obounds
Exemplo n.º 7
0
def get_bounds(obj, input_keys, output_keys, J):
    """ Returns a pair of dictionaries that contain the stop and end index
    for each input and output in a pair of lists.
    """

    ibounds = {}
    nvar = 0
    if hasattr(obj, 'parent'):
        scope = obj.parent
    else:
        scope = None  # Pseudoassys

    for key in input_keys:

        # For parameter group, all should be equal so just get first.
        if not isinstance(key, tuple):
            key = [key]

        val = obj.get(key[0])

        width = flattened_size('.'.join((obj.name, key[0])), val, scope=scope)
        shape = val.shape if hasattr(val, 'shape') else None
        for item in key:
            ibounds[item] = (nvar, nvar + width, shape)
        nvar += width

    num_input = nvar

    obounds = {}
    nvar = 0
    for key in output_keys:
        val = obj.get(key)
        width = flattened_size('.'.join((obj.name, key)), val)
        shape = val.shape if hasattr(val, 'shape') else None
        obounds[key] = (nvar, nvar + width, shape)
        nvar += width

    num_output = nvar

    # Give the user an intelligible error if the size of J is wrong.
    J_output, J_input = J.shape
    if num_output != J_output or num_input != J_input:
        msg = 'Jacobian is the wrong size. Expected ' + \
            '(%dx%d) but got (%dx%d)' % (num_output, num_input,
                                         J_output, J_input)
        obj.raise_exception(msg, RuntimeError)

    return ibounds, obounds
Exemplo n.º 8
0
def get_bounds(obj, input_keys, output_keys, J):
    """ Returns a pair of dictionaries that contain the stop and end index
    for each input and output in a pair of lists.
    """

    ibounds = {}
    nvar = 0
    scope = getattr(obj, 'parent', None)

    for key in input_keys:

        # For parameter group, all should be equal so just get first.
        if not isinstance(key, tuple):
            key = [key]

        val = obj.get(key[0])

        width = flattened_size('.'.join((obj.name, key[0])), val,
                               scope=scope)
        shape = getattr(val, 'shape', None)
        for item in key:
            ibounds[item] = (nvar, nvar+width, shape)
        nvar += width

    num_input = nvar

    obounds = {}
    nvar = 0
    for key in output_keys:
        val = obj.get(key)
        width = flattened_size('.'.join((obj.name, key)), val)
        shape = getattr(val, 'shape', None)
        obounds[key] = (nvar, nvar+width, shape)
        nvar += width

    num_output = nvar

    if num_input and num_output:
        # Give the user an intelligible error if the size of J is wrong.
        J_output, J_input = J.shape
        if num_output != J_output or num_input != J_input:
            msg = 'Jacobian is the wrong size. Expected ' + \
                '(%dx%d) but got (%dx%d)' % (num_output, num_input,
                                             J_output, J_input)
            obj.raise_exception(msg, RuntimeError)

    return ibounds, obounds
Exemplo n.º 9
0
 def get_width(self, attr):
     """Return the flattened width of the value of the given attribute."""
     width = self._width_cache.get(attr, _missing)
     if width is _missing:
         param = from_PA_var(attr)
         self._width_cache[attr] = width = flattened_size(
             param, self.scope.get(param), self.scope)
     return width
Exemplo n.º 10
0
 def get_width(self, attr):
     """Return the flattened width of the value of the given attribute."""
     width = self._width_cache.get(attr, _missing)
     if width is _missing:
         param = from_PA_var(attr)
         self._width_cache[attr] = width = flattened_size(param,
                                                          self.scope.get(param),
                                                          self.scope)
     return width
Exemplo n.º 11
0
 def get_flattened_size(self):
     """Return the size of a flattened float array containing
     all values in the vartree that are flattenable to float
     arrays.  Any values not flattenable to float arrays will
     raise a NoFlatError.
     """
     size = 0
     for key in self.list_vars():
         size += flattened_size(key, getattr(self, key), scope=self)
     return size
Exemplo n.º 12
0
def get_bounds(obj, input_keys, output_keys):
    """ Returns a pair of dictionaries that contain the stop and end index
    for each input and output in a pair of lists.
    """

    ibounds = {}
    nvar = 0
    if hasattr(obj, 'parent'):
        scope = obj.parent
    else:
        scope = None  # Pseudoassys

    for key in input_keys:

        # For parameter group, all should be equal so just get first.
        if not isinstance(key, tuple):
            key = [key]

        val = obj.get(key[0])

        width = flattened_size('.'.join((obj.name, key[0])), val,
                               scope=scope)
        shape = val.shape if hasattr(val, 'shape') else None
        for item in key:
            ibounds[item] = (nvar, nvar+width, shape)
        nvar += width

    obounds = {}
    nvar = 0
    for key in output_keys:
        val = obj.get(key)
        width = flattened_size('.'.join((obj.name, key)), val)
        shape = val.shape if hasattr(val, 'shape') else None
        obounds[key] = (nvar, nvar+width, shape)
        nvar += width

    return ibounds, obounds
Exemplo n.º 13
0
    def __init__(self, pa):
        """ Performs finite difference on the components in a given
        pseudo_assembly. """

        self.inputs = pa.inputs
        self.outputs = pa.outputs
        self.in_bounds = {}
        self.out_bounds = {}
        self.pa = pa
        self.scope = pa.wflow.scope

        options = pa.wflow._parent.gradient_options

        self.fd_step = options.fd_step*ones((len(self.inputs)))
        self.form = options.fd_form
        self.form_custom = {}
        self.step_type = options.fd_step_type
        self.step_type_custom = {}
        self.relative_threshold = 1.0e-4

        driver = self.pa.wflow._parent
        driver_params = []
        driver_targets = []
        if hasattr(driver, 'get_parameters'):
            driver_params = self.pa.wflow._parent.get_parameters()
            driver_targets = driver.list_param_targets()
        in_size = 0
        for j, srcs in enumerate(self.inputs):

            low = high = None

            # Support for parameter groups
            if isinstance(srcs, basestring):
                srcs = [srcs]

            # Local stepsize support
            meta = self.scope.get_metadata(self.scope._depgraph.base_var(srcs[0]))

            if 'fd_step' in meta:
                self.fd_step[j] = meta['fd_step']

            if 'low' in meta:
                low = meta[ 'low' ]
            if 'high' in meta:
                high = meta[ 'high' ]

            if srcs[0] in driver_targets:
                if srcs[0] in driver_params:
                    param = driver_params[srcs[0]]
                    if param.fd_step is not None:
                        self.fd_step[j] = param.fd_step
                    if param.low is not None:
                        low = param.low
                    if param.high is not None:
                        high = param.high
                else:
                    # have to check through all the param groups
                    for param_group in driver_params:
                        is_fd_step_not_set = is_low_not_set = is_high_not_set = True
                        if not isinstance(param_group, str) and \
                           srcs[0] in param_group:
                            param = driver_params[param_group]
                            if is_fd_step_not_set and param.fd_step is not None:
                                self.fd_step[j] = param.fd_step
                                is_fd_step_not_set = False
                            if is_low_not_set and param.low is not None:
                                low = param.low
                                is_low_not_set = False
                            if is_high_not_set and param.high is not None:
                                high = param.high
                                is_high_not_set = False

            if 'fd_step_type' in meta:
                self.step_type_custom[j] = meta['fd_step_type']
                step_type = self.step_type_custom[j]
            else:
                step_type = self.step_type

            # Bounds scaled
            if step_type == 'bounds_scaled':
                if low is None and high is None :
                    raise RuntimeError("For variable '%s', a finite "
                                       "difference step type of "
                                       "bounds_scaled is used but required low and "
                                       "high values are not set" % srcs[0] )
                if low == - float_info.max:
                    raise RuntimeError("For variable '%s', a finite "
                                       "difference step type of "
                                       "bounds_scaled is used but required "
                                       "low value is not set" % srcs[0] )
                if high == float_info.max:
                    raise RuntimeError("For variable '%s', a finite "
                                       "difference step type of "
                                       "bounds_scaled is used but required "
                                       "high value is not set" % srcs[0] )
                self.fd_step[j] = ( high - low ) * self.fd_step[j]

            if 'fd_form' in meta:
                self.form_custom[j] = meta['fd_form']

            val = self.scope.get(srcs[0])
            width = flattened_size(srcs[0], val, self.scope)
            for src in srcs:
                self.in_bounds[src] = (in_size, in_size+width)
            in_size += width

        out_size = 0
        for src in self.outputs:
            val = self.scope.get(src)
            width = flattened_size(src, val)
            self.out_bounds[src] = (out_size, out_size+width)
            out_size += width

        self.J = zeros((out_size, in_size))
        self.y_base = zeros((out_size,))
        self.x = zeros((in_size,))
        self.y = zeros((out_size,))
        self.y2 = zeros((out_size,))
Exemplo n.º 14
0
    def calc_gradient(self,
                      inputs=None,
                      outputs=None,
                      upscope=False,
                      mode='auto'):
        """Returns the gradient of the passed outputs with respect to
        all passed inputs.

        inputs: list of strings or tuples of strings
            List of input variables that we are taking derivatives with respect
            to. They must be within this workflow's scope. If no inputs are
            given, the parent driver's parameters are used. A tuple can be used
            to link inputs together.

        outputs: list of strings
            List of output variables that we are taking derivatives of.
            They must be within this workflow's scope. If no outputs are
            given, the parent driver's objectives and constraints are used.

        upscope: boolean
            This is set to True when our workflow is part of a subassembly that
            lies in a workflow that needs a gradient with respect to variables
            outside of this workflow, so that the caches can be reset.

        mode: string
            Set to 'forward' for forward mode, 'adjoint' for adjoint mode,
            'fd' for full-model finite difference (with fake finite
            difference disabled), or 'auto' to let OpenMDAO determine the
            correct mode.
        """

        self._J_cache = {}

        # User may request full-model finite difference.
        if self._parent.gradient_options.force_fd == True:
            mode = 'fd'

        # This function can be called from a parent driver's workflow for
        # assembly recursion. We have to clear our cache if that happens.
        # We also have to clear it next time we arrive back in our workflow.
        if upscope or self._upscoped:
            self._derivative_graph = None
            self._edges = None
            self._comp_edges = None

            self._upscoped = upscope

        dgraph = self.derivative_graph(inputs, outputs, fd=(mode == 'fd'))

        if 'mapped_inputs' in dgraph.graph:
            inputs = dgraph.graph['mapped_inputs']
            outputs = dgraph.graph['mapped_outputs']
        else:
            inputs = dgraph.graph['inputs']
            outputs = dgraph.graph['outputs']

        n_edge = self.initialize_residual()

        # cache Jacobians for comps that return them from provideJ

        # Size our Jacobian
        num_in = 0
        for item in inputs:

            # For parameter groups, only size the first
            if not isinstance(item, basestring):
                item = item[0]

            try:
                i1, i2 = self.get_bounds(item)
                if isinstance(i1, list):
                    num_in += len(i1)
                else:
                    num_in += i2 - i1
            except KeyError:
                val = self.scope.get(item)
                num_in += flattened_size(item, val, self.scope)

        num_out = 0
        for item in outputs:
            try:
                i1, i2 = self.get_bounds(item)
                if isinstance(i1, list):
                    num_out += len(i1)
                else:
                    num_out += i2 - i1
            except KeyError:
                val = self.scope.get(item)
                num_out += flattened_size(item, val, self.scope)

        shape = (num_out, num_in)

        # Auto-determine which mode to use based on Jacobian shape.
        if mode == 'auto':
            # TODO - additional determination based on presence of
            # apply_derivT

            if num_in > num_out:
                mode = 'adjoint'
            else:
                mode = 'forward'

        if mode == 'adjoint':
            J = calc_gradient_adjoint(self, inputs, outputs, n_edge, shape)
        elif mode in ['forward', 'fd']:
            J = calc_gradient(self, inputs, outputs, n_edge, shape)
        else:
            msg = "In calc_gradient, mode must be 'forward', 'adjoint', " + \
                  "'auto', or 'fd', but a value of %s was given." % mode
            self.scope.raise_exception(msg, RuntimeError)

        # Finally, we need to untransform the jacobian if any parameters have
        # scalers.
        #print 'edges:', self._edges
        if not hasattr(self._parent, 'get_parameters'):
            return J

        params = self._parent.get_parameters()

        if len(params) == 0:
            return J

        i = 0
        for group in inputs:

            if isinstance(group, str):
                group = [group]

            name = group[0]
            if len(group) > 1:
                pname = tuple([from_PA_var(aname) for aname in group])
            else:
                pname = from_PA_var(name)

            try:
                i1, i2 = self.get_bounds(name)
            except KeyError:
                continue

            if isinstance(i1, list):
                width = len(i1)
            else:
                width = i2 - i1

            if pname in params:
                scaler = params[pname].scaler
                if scaler != 1.0:
                    J[:, i:i + width] = J[:, i:i + width] * scaler

            i = i + width
        #print J
        return J
Exemplo n.º 15
0
    def initialize_residual(self):
        """Creates the array that stores the residual. Also returns the
        number of edges.
        """
        dgraph = self.derivative_graph()
        if 'mapped_inputs' in dgraph.graph:
            inputs = dgraph.graph['mapped_inputs']
        else:
            inputs = dgraph.graph['inputs']

        basevars = set()
        edges = self.edge_list()
        implicit_edges = self.get_implicit_info()
        sortedkeys = sorted(implicit_edges)
        sortedkeys.extend(sorted(edges.keys()))

        nEdge = 0
        for src in sortedkeys:

            if src in implicit_edges:
                targets = implicit_edges[src]
                is_implicit = True
            else:
                targets = edges[src]
                is_implicit = False

            if isinstance(targets, str):
                targets = [targets]

            # Implicit source edges are tuples.
            if is_implicit == True:
                impli_edge = nEdge
                for resid in src:
                    unmap_src = from_PA_var(resid)

                    val = self.scope.get(unmap_src)
                    width = flattened_size(unmap_src, val, self.scope)

                    if isinstance(val, ndarray):
                        shape = val.shape
                    else:
                        shape = 1

                    bound = (impli_edge, impli_edge + width)
                    self.set_bounds(resid, bound)
                    basevars.add(resid)
                    impli_edge += width

            # Regular components
            else:

                # Only need to grab the source (or first target for param) to
                # figure out the size for the residual vector
                measure_src = src
                if '@in' in src:
                    idx = int(src[3:].split('[')[0])
                    inp = inputs[idx]
                    if not isinstance(inp, basestring):
                        inp = inp[0]
                    if inp in dgraph:
                        measure_src = inp
                    else:
                        measure_src = targets[0]
                elif src == '@fake':
                    for t in targets:
                        if not t.startswith('@'):
                            measure_src = t
                            break
                    else:
                        raise RuntimeError("malformed graph!")

                # Find our width, etc.
                unmap_src = from_PA_var(measure_src)
                val = self.scope.get(unmap_src)
                width = flattened_size(unmap_src, val, self.scope)
                if isinstance(val, ndarray):
                    shape = val.shape
                else:
                    shape = 1

                # Special poke for boundary node
                if is_boundary_node(dgraph, measure_src) or \
                   is_boundary_node(dgraph, dgraph.base_var(measure_src)):
                    bound = (nEdge, nEdge + width)
                    self.set_bounds(measure_src, bound)

                src_noidx = src.split('[', 1)[0]

                # Poke our source data

                # Array slice of src that is already allocated
                if '[' in src and src_noidx in basevars:
                    _, _, idx = src.partition('[')
                    basebound = self.get_bounds(src_noidx)
                    if not '@in' in src_noidx:
                        unmap_src = from_PA_var(src_noidx)
                        val = self.scope.get(unmap_src)
                        shape = val.shape
                    offset = basebound[0]
                    istring, ix = flatten_slice(idx,
                                                shape,
                                                offset=offset,
                                                name='ix')
                    bound = (istring, ix)
                    # Already allocated
                    width = 0

                # Input-input connection to implicit state
                elif src_noidx in basevars:
                    bound = self.get_bounds(src_noidx)
                    width = 0

                # Normal src
                else:
                    bound = (nEdge, nEdge + width)

                self.set_bounds(src, bound)
                basevars.add(src)

            # Poke our target data
            impli_edge = nEdge
            for target in targets:

                # Handle States in implicit comps
                if is_implicit == True:

                    if isinstance(target, str):
                        target = [target]

                    unmap_targ = from_PA_var(target[0])
                    val = self.scope.get(unmap_targ)
                    imp_width = flattened_size(unmap_targ, val, self.scope)
                    if isinstance(val, ndarray):
                        shape = val.shape
                    else:
                        shape = 1

                    for itarget in target:
                        bound = (impli_edge, impli_edge + imp_width)
                        self.set_bounds(itarget, bound)
                        basevars.add(itarget)

                    impli_edge += imp_width
                    width = impli_edge - nEdge

                elif not target.startswith('@'):
                    self.set_bounds(target, bound)

            #print input_src, src, target, bound,
            nEdge += width
            impli_edge = nEdge

        # Initialize the residual vector on the first time through, and also
        # if for some reason the number of edges has changed.
        if self.res is None or nEdge != self.res.shape[0]:
            self.res = zeros((nEdge, 1))

        return nEdge
    def __init__(self, system, inputs, outputs, return_format='array'):
        """ Performs finite difference on the components in a given
        System. """

        self.inputs = inputs
        self.outputs = outputs
        self.in_bounds = {}
        self.system = system
        self.scope = system.scope
        self.return_format = return_format

        options = system.options
        driver = options.parent

        self.fd_step = options.fd_step*ones((len(self.inputs)))
        self.low = [None] * len(self.inputs)
        self.high = [None] * len(self.inputs)

        self.form = options.fd_form
        self.form_custom = {}
        self.step_type = options.fd_step_type
        self.step_type_custom = {}
        self.relative_threshold = 1.0e-4

        dgraph = self.scope._depgraph
        driver_params = []
        driver_targets = []

        if hasattr(driver, 'get_parameters'):
            driver_params = driver.get_parameters()
            driver_targets = driver.list_param_targets()

        in_size = 0
        for j, srcs in enumerate(self.inputs):

            low = high = None

            # Support for parameter groups
            if isinstance(srcs, basestring):
                srcs = [srcs]

            # Local stepsize support
            meta = self.scope.get_metadata(base_var(dgraph, srcs[0]))

            if 'fd_step' in meta:
                self.fd_step[j] = meta['fd_step']

            if 'low' in meta:
                low = meta['low']
            if 'high' in meta:
                high = meta['high']

            # Settings in the add_parameter call trump all others
            param_srcs = [item for item in srcs if item in driver_targets]
            if param_srcs:
                if param_srcs[0] in driver_params:
                    param = driver_params[param_srcs[0]]
                    if param.fd_step is not None:
                        self.fd_step[j] = param.fd_step
                    if param.low is not None:
                        low = param.low
                    if param.high is not None:
                        high = param.high
                else:
                    # have to check through all the param groups
                    for param_group in driver_params:
                        is_fd_step_not_set = is_low_not_set = \
                                             is_high_not_set = True
                        if not isinstance(param_group, str) and \
                           param_srcs[0] in param_group:
                            param = driver_params[param_group]
                            if is_fd_step_not_set and param.fd_step is not None:
                                self.fd_step[j] = param.fd_step
                                is_fd_step_not_set = False
                            if is_low_not_set and param.low is not None:
                                low = param.low
                                is_low_not_set = False
                            if is_high_not_set and param.high is not None:
                                high = param.high
                                is_high_not_set = False

            if 'fd_step_type' in meta:
                self.step_type_custom[j] = meta['fd_step_type']
                step_type = self.step_type_custom[j]
            else:
                step_type = self.step_type

            # Bounds scaled
            if step_type == 'bounds_scaled':
                if low is None and high is None:
                    raise RuntimeError("For variable '%s', a finite "
                                       "difference step type of bounds_scaled "
                                       "is used but required low and "
                                       "high values are not set" % srcs[0])
                if low == - float_info.max:
                    raise RuntimeError("For variable '%s', a finite "
                                       "difference step type of "
                                       "bounds_scaled is used but required "
                                       "low value is not set" % srcs[0])
                if high == float_info.max:
                    raise RuntimeError("For variable '%s', a finite "
                                       "difference step type of "
                                       "bounds_scaled is used but required "
                                       "high value is not set" % srcs[0])
                self.fd_step[j] = (high - low) * self.fd_step[j]

            if 'fd_form' in meta:
                self.form_custom[j] = meta['fd_form']

            val = self.scope.get(srcs[0])
            width = flattened_size(srcs[0], val, self.scope)

            for src in srcs:
                self.in_bounds[src] = (in_size, in_size+width)
            in_size += width

            self.high[j] = high
            self.low[j] = low

        out_size = 0
        for src in self.outputs:
            val = self.scope.get(src)
            width = flattened_size(src, val)
            out_size += width

        # Size our Jacobian
        if return_format == 'dict':
            self.J = {}
            for okey in outputs:

                self.J[okey] = {}
                for ikey in inputs:
                    if isinstance(ikey, tuple):
                        ikey = ikey[0]

                    # If output not on this process, just allocate a dummy
                    # array
                    if MPI and okey not in self.system.vec['u']:
                        osize = 0
                    else:
                        osize = self.system.vec['u'][okey].size

                    isize = self.system.vec['p'][ikey].size

                    self.J[okey][ikey] = zeros((osize, isize))
        else:
            self.J = zeros((out_size, in_size))

        self.y_base = zeros((out_size,))
        self.y = zeros((out_size,))
        self.y2 = zeros((out_size,))
Exemplo n.º 17
0
def calc_gradient(wflow, inputs, outputs, n_edge, shape):
    """Returns the gradient of the passed outputs with respect to
    all passed inputs.
    """

    # Size the problem
    A = LinearOperator((n_edge, n_edge), matvec=wflow.matvecFWD, dtype=float)

    J = zeros(shape)

    # Each comp calculates its own derivatives at the current
    # point. (i.e., linearizes)
    wflow.calc_derivatives(first=True)

    dgraph = wflow._derivative_graph
    options = wflow._parent.gradient_options

    # Forward mode, solve linear system for each parameter
    j = 0
    for param in inputs:

        if isinstance(param, tuple):

            # You can ask for derivatives of broadcast inputs in cases
            # where some of the inputs aren't in the relevance graph.
            # Find the one that is.
            for bcast_param in param:
                if bcast_param in dgraph and 'bounds' in dgraph.node[
                        bcast_param]:
                    param = bcast_param
                    break
            else:
                param = param[0]
                #raise RuntimeError("didn't find any of '%s' in derivative graph for '%s'" %
                #(param, wflow._parent.get_pathname()))
        try:
            i1, i2 = wflow.get_bounds(param)
        except KeyError:

            # If you end up here, it is usually because you have a
            # tuple of broadcast inputs containing only non-relevant
            # variables. Derivative is zero, so take one and increment
            # by its width.

            # TODO - We need to cache these when we remove
            # boundcaching from the graph
            val = wflow.scope.get(param)
            j += flattened_size(param, val, wflow.scope)
            continue

        if isinstance(i1, list):
            in_range = i1
        else:
            in_range = range(i1, i2)

        for irhs in in_range:

            RHS = zeros((n_edge, 1))
            RHS[irhs, 0] = 1.0

            # Call GMRES to solve the linear system
            dx, info = gmres(A,
                             RHS,
                             tol=options.gmres_tolerance,
                             maxiter=options.gmres_maxiter)
            if info > 0:
                msg = "ERROR in calc_gradient in '%s': gmres failed to converge " \
                      "after %d iterations for parameter '%s' at index %d"
                logger.error(msg %
                             (wflow._parent.get_pathname(), info, param, irhs))
            elif info < 0:
                msg = "ERROR in calc_gradient in '%s': gmres failed " \
                      "for parameter '%s' at index %d"
                logger.error(msg % (wflow._parent.get_pathname(), param, irhs))

            i = 0
            for item in outputs:
                try:
                    k1, k2 = wflow.get_bounds(item)
                except KeyError:
                    continue

                if isinstance(k1, list):
                    J[i:i + (len(k1)), j] = dx[k1]
                    i += len(k1)
                else:
                    J[i:i + (k2 - k1), j] = dx[k1:k2]
                    i += k2 - k1

            j += 1

    #print inputs, '\n', outputs, '\n', J
    return J
    def __init__(self, system, inputs, outputs, return_format='array'):
        """ Performs finite difference on the components in a given
        System. """

        self.inputs = inputs
        self.outputs = outputs
        self.in_bounds = {}
        self.system = system
        self.scope = system.scope
        self.return_format = return_format

        options = system.options
        driver = options.parent

        self.fd_step = options.fd_step*ones((len(self.inputs)))
        self.low = [None] * len(self.inputs)
        self.high = [None] * len(self.inputs)

        self.form = options.fd_form
        self.form_custom = {}
        self.step_type = options.fd_step_type
        self.step_type_custom = {}
        self.relative_threshold = 1.0e-4

        dgraph = self.scope._depgraph
        driver_params = []
        driver_targets = []

        if hasattr(driver, 'get_parameters'):
            driver_params = driver.get_parameters()
            driver_targets = driver.list_param_targets()

        in_size = 0
        for j, srcs in enumerate(self.inputs):

            low = high = None

            # Support for parameter groups
            if isinstance(srcs, basestring):
                srcs = [srcs]

            # Local stepsize support
            meta = self.scope.get_metadata(base_var(dgraph, srcs[0]))

            if 'fd_step' in meta:
                self.fd_step[j] = meta['fd_step']

            if 'low' in meta:
                low = meta['low']
            if 'high' in meta:
                high = meta['high']

            # Settings in the add_parameter call trump all others
            param_srcs = [item for item in srcs if item in driver_targets]
            if param_srcs:
                if param_srcs[0] in driver_params:
                    param = driver_params[param_srcs[0]]
                    if param.fd_step is not None:
                        self.fd_step[j] = param.fd_step
                    if param.low is not None:
                        low = param.low
                    if param.high is not None:
                        high = param.high
                else:
                    # have to check through all the param groups
                    for param_group in driver_params:
                        is_fd_step_not_set = is_low_not_set = \
                                             is_high_not_set = True
                        if not isinstance(param_group, str) and \
                           param_srcs[0] in param_group:
                            param = driver_params[param_group]
                            if is_fd_step_not_set and param.fd_step is not None:
                                self.fd_step[j] = param.fd_step
                                is_fd_step_not_set = False
                            if is_low_not_set and param.low is not None:
                                low = param.low
                                is_low_not_set = False
                            if is_high_not_set and param.high is not None:
                                high = param.high
                                is_high_not_set = False

            if 'fd_step_type' in meta:
                self.step_type_custom[j] = meta['fd_step_type']
                step_type = self.step_type_custom[j]
            else:
                step_type = self.step_type

            # Bounds scaled
            if step_type == 'bounds_scaled':
                if low is None and high is None:
                    raise RuntimeError("For variable '%s', a finite "
                                       "difference step type of bounds_scaled "
                                       "is used but required low and "
                                       "high values are not set" % srcs[0])
                if low == - float_info.max:
                    raise RuntimeError("For variable '%s', a finite "
                                       "difference step type of "
                                       "bounds_scaled is used but required "
                                       "low value is not set" % srcs[0])
                if high == float_info.max:
                    raise RuntimeError("For variable '%s', a finite "
                                       "difference step type of "
                                       "bounds_scaled is used but required "
                                       "high value is not set" % srcs[0])
                self.fd_step[j] = (high - low) * self.fd_step[j]

            if 'fd_form' in meta:
                self.form_custom[j] = meta['fd_form']

            val = self.scope.get(srcs[0])
            width = flattened_size(srcs[0], val, self.scope)

            for src in srcs:
                self.in_bounds[src] = (in_size, in_size+width)
            in_size += width

            self.high[j] = high
            self.low[j] = low

        out_size = 0
        for src in self.outputs:
            val = self.scope.get(src)
            width = flattened_size(src, val)
            out_size += width

        # Size our Jacobian
        if return_format == 'dict':
            self.J = {}
            for okey in outputs:
                
                self.J[okey] = {}
                for ikey in inputs:
                    if isinstance(ikey, tuple):
                        ikey = ikey[0]

                    # If output not on this process, just allocate a dummy
                    # array
                    if MPI and okey not in self.system.vec['u']:
                        osize = 0
                    else:
                        osize = self.system.vec['u'][okey].size

                    isize = self.system.vec['p'][ikey].size
                    
                    self.J[okey][ikey] = zeros((osize, isize))
        else:
            self.J = zeros((out_size, in_size))

        self.y_base = zeros((out_size,))
        self.y = zeros((out_size,))
        self.y2 = zeros((out_size,))
Exemplo n.º 19
0
    def __init__(self, pa):
        """ Performs finite difference on the components in a given
        pseudo_assembly. """

        self.inputs = pa.inputs
        self.outputs = pa.outputs
        self.in_bounds = {}
        self.out_bounds = {}
        self.pa = pa
        self.scope = pa.wflow.scope

        options = pa.wflow._parent.gradient_options

        self.fd_step = options.fd_step*ones((len(self.inputs)))
        self.form = options.fd_form
        self.form_custom = {}
        self.step_type = options.fd_step_type
        self.step_type_custom = {}
        self.relative_threshold = 1.0e-4

        driver = self.pa.wflow._parent
        driver_params = []
        driver_targets = []
        if hasattr(driver, 'get_parameters'):
            driver_params = self.pa.wflow._parent.get_parameters()
            driver_targets = driver.list_param_targets()
        in_size = 0
        for j, srcs in enumerate(self.inputs):

            # Support for parameter groups
            if isinstance(srcs, basestring):
                srcs = [srcs]

            # Local stepsize support
            meta = self.scope.get_metadata(self.scope._depgraph.base_var(srcs[0]))

            if 'fd_step' in meta:
                self.fd_step[j] = meta['fd_step']

            if srcs[0] in driver_targets:
                if srcs[0] in driver_params:
                    param = driver_params[srcs[0]]
                    if param.fd_step is not None:
                        self.fd_step[j] = param.fd_step
                else:
                    # have to check through all the param groups
                    for param_group in driver_params:
                        if not isinstance(param_group, str) and \
                           srcs[0] in param_group:
                            param = driver_params[param_group]
                            if param.fd_step is not None:
                                self.fd_step[j] = param.fd_step
                                break

            if 'fd_step_type' in meta:
                self.step_type_custom[j] = meta['fd_step_type']

            if 'fd_form' in meta:
                self.form_custom[j] = meta['fd_form']

            val = self.scope.get(srcs[0])
            width = flattened_size(srcs[0], val, self.scope)
            for src in srcs:
                self.in_bounds[src] = (in_size, in_size+width)
            in_size += width

        out_size = 0
        for src in self.outputs:
            val = self.scope.get(src)
            width = flattened_size(src, val)
            self.out_bounds[src] = (out_size, out_size+width)
            out_size += width

        self.J = zeros((out_size, in_size))
        self.y_base = zeros((out_size,))
        self.x = zeros((in_size,))
        self.y = zeros((out_size,))
        self.y2 = zeros((out_size,))
    def initialize_residual(self):
        """Creates the array that stores the residual. Also returns the
        number of edges.
        """
        dgraph = self.derivative_graph()
        if 'mapped_inputs' in dgraph.graph:
            inputs = dgraph.graph['mapped_inputs']
        else:
            inputs = dgraph.graph['inputs']

        basevars = set()
        edges = self.edge_list()
        implicit_edges = self.get_implicit_info()
        sortedkeys = sorted(implicit_edges)
        sortedkeys.extend(sorted(edges.keys()))

        nEdge = 0
        for src in sortedkeys:

            if src in implicit_edges:
                targets = implicit_edges[src]
                is_implicit = True
            else:
                targets = edges[src]
                is_implicit = False

            if isinstance(targets, str):
                targets = [targets]

            # Implicit source edges are tuples.
            if is_implicit == True:
                impli_edge = nEdge
                for resid in src:
                    unmap_src = from_PA_var(resid)

                    val = self.scope.get(unmap_src)
                    width = flattened_size(unmap_src, val, self.scope)

                    if isinstance(val, ndarray):
                        shape = val.shape
                    else:
                        shape = 1

                    bound = (impli_edge, impli_edge+width)
                    self.set_bounds(resid, bound)
                    basevars.add(resid)
                    impli_edge += width

            # Regular components
            else:

                # Only need to grab the source (or first target for param) to
                # figure out the size for the residual vector
                measure_src = src
                if '@in' in src:
                    idx = int(src[3:].split('[')[0])
                    inp = inputs[idx]
                    if not isinstance(inp, basestring):
                        inp = inp[0]
                    if inp in dgraph:
                        measure_src = inp
                    else:
                        measure_src = targets[0]
                elif src == '@fake':
                    for t in targets:
                        if not t.startswith('@'):
                            measure_src = t
                            break
                    else:
                        raise RuntimeError("malformed graph!")

                # Find our width, etc.
                unmap_src = from_PA_var(measure_src)
                val = self.scope.get(unmap_src)
                width = flattened_size(unmap_src, val, self.scope)
                if isinstance(val, ndarray):
                    shape = val.shape
                else:
                    shape = 1

                # Special poke for boundary node
                if is_boundary_node(dgraph, measure_src) or \
                   is_boundary_node(dgraph, dgraph.base_var(measure_src)):
                    bound = (nEdge, nEdge+width)
                    self.set_bounds(measure_src, bound)

                src_noidx = src.split('[', 1)[0]

                # Poke our source data

                # Array slice of src that is already allocated
                if '[' in src and src_noidx in basevars:
                    _, _, idx = src.partition('[')
                    basebound = self.get_bounds(src_noidx)
                    if not '@in' in src_noidx:
                        unmap_src = from_PA_var(src_noidx)
                        val = self.scope.get(unmap_src)
                        shape = val.shape
                    offset = basebound[0]
                    istring, ix = flatten_slice(idx, shape, offset=offset,
                                                name='ix')
                    bound = (istring, ix)
                    # Already allocated
                    width = 0

                # Input-input connection to implicit state
                elif src_noidx in basevars:
                    bound = self.get_bounds(src_noidx)
                    width = 0

                # Normal src
                else:
                    bound = (nEdge, nEdge+width)

                self.set_bounds(src, bound)
                basevars.add(src)

            # Poke our target data
            impli_edge = nEdge
            for target in targets:

                # Handle States in implicit comps
                if is_implicit == True:

                    if isinstance(target, str):
                        target = [target]

                    unmap_targ = from_PA_var(target[0])
                    val = self.scope.get(unmap_targ)
                    imp_width = flattened_size(unmap_targ, val, self.scope)
                    if isinstance(val, ndarray):
                        shape = val.shape
                    else:
                        shape = 1

                    for itarget in target:
                        bound = (impli_edge, impli_edge+imp_width)
                        self.set_bounds(itarget, bound)
                        basevars.add(itarget)

                    impli_edge += imp_width
                    width = impli_edge - nEdge

                elif not target.startswith('@'):
                    self.set_bounds(target, bound)

            #print input_src, src, target, bound,
            nEdge += width
            impli_edge = nEdge

        # Initialize the residual vector on the first time through, and also
        # if for some reason the number of edges has changed.
        if self.res is None or nEdge != self.res.shape[0]:
            self.res = zeros((nEdge, 1))

        return nEdge
Exemplo n.º 21
0
    def calc_gradient(self, inputs=None, outputs=None, upscope=False, mode='auto'):
        """Returns the gradient of the passed outputs with respect to
        all passed inputs.

        inputs: list of strings or tuples of strings
            List of input variables that we are taking derivatives with respect
            to. They must be within this workflow's scope. If no inputs are
            given, the parent driver's parameters are used. A tuple can be used
            to link inputs together.

        outputs: list of strings
            List of output variables that we are taking derivatives of.
            They must be within this workflow's scope. If no outputs are
            given, the parent driver's objectives and constraints are used.

        upscope: boolean
            This is set to True when our workflow is part of a subassembly that
            lies in a workflow that needs a gradient with respect to variables
            outside of this workflow, so that the caches can be reset.

        mode: string
            Set to 'forward' for forward mode, 'adjoint' for adjoint mode,
            'fd' for full-model finite difference (with fake finite
            difference disabled), or 'auto' to let OpenMDAO determine the
            correct mode.
        """

        self._J_cache = {}

        # User may request full-model finite difference.
        if self._parent.gradient_options.force_fd == True:
            mode = 'fd'

        # This function can be called from a parent driver's workflow for
        # assembly recursion. We have to clear our cache if that happens.
        # We also have to clear it next time we arrive back in our workflow.
        if upscope or self._upscoped:
            self._derivative_graph = None
            self._edges = None
            self._comp_edges = None

            self._upscoped = upscope

        dgraph = self.derivative_graph(inputs, outputs, fd=(mode == 'fd'))

        if 'mapped_inputs' in dgraph.graph:
            inputs = dgraph.graph['mapped_inputs']
            outputs = dgraph.graph['mapped_outputs']
        else:
            inputs = dgraph.graph['inputs']
            outputs = dgraph.graph['outputs']

        n_edge = self.initialize_residual()

        # cache Jacobians for comps that return them from provideJ


        # Size our Jacobian
        num_in = 0
        for item in inputs:

            # For parameter groups, only size the first
            if not isinstance(item, basestring):
                item = item[0]

            try:
                i1, i2 = self.get_bounds(item)
                if isinstance(i1, list):
                    num_in += len(i1)
                else:
                    num_in += i2-i1
            except KeyError:
                val = self.scope.get(item)
                num_in += flattened_size(item, val, self.scope)

        num_out = 0
        for item in outputs:
            try:
                i1, i2 = self.get_bounds(item)
                if isinstance(i1, list):
                    num_out += len(i1)
                else:
                    num_out += i2-i1
            except KeyError:
                val = self.scope.get(item)
                num_out += flattened_size(item, val, self.scope)

        shape = (num_out, num_in)

        # Auto-determine which mode to use based on Jacobian shape.
        if mode == 'auto':
            # TODO - additional determination based on presence of
            # apply_derivT

            if num_in > num_out:
                mode = 'adjoint'
            else:
                mode = 'forward'

        if mode == 'adjoint':
            J = calc_gradient_adjoint(self, inputs, outputs, n_edge, shape)
        elif mode in ['forward', 'fd']:
            J = calc_gradient(self, inputs, outputs, n_edge, shape)
        else:
            msg = "In calc_gradient, mode must be 'forward', 'adjoint', " + \
                  "'auto', or 'fd', but a value of %s was given." % mode
            self.scope.raise_exception(msg, RuntimeError)

        # Finally, we need to untransform the jacobian if any parameters have
        # scalers.
        #print 'edges:', self._edges
        if not hasattr(self._parent, 'get_parameters'):
            return J

        params = self._parent.get_parameters()

        if len(params) == 0:
            return J

        i = 0
        for group in inputs:

            if isinstance(group, str):
                group = [group]

            name = group[0]
            if len(group) > 1:
                pname = tuple([from_PA_var(aname) for aname in group])
            else:
                pname = from_PA_var(name)

            try:
                i1, i2 = self.get_bounds(name)
            except KeyError:
                continue

            if isinstance(i1, list):
                width = len(i1)
            else:
                width = i2-i1

            if pname in params:
                scaler = params[pname].scaler
                if scaler != 1.0:
                    J[:, i:i+width] = J[:, i:i+width]*scaler

            i = i + width
        #print J
        return J
Exemplo n.º 22
0
    def __init__(self, pa):
        """ Performs finite difference on the components in a given
        pseudo_assembly. """

        self.inputs = pa.inputs
        self.outputs = pa.outputs
        self.in_bounds = {}
        self.out_bounds = {}
        self.pa = pa
        self.scope = pa.wflow.scope

        options = pa.wflow._parent.gradient_options

        self.fd_step = options.fd_step * ones((len(self.inputs)))
        self.form = options.fd_form
        self.form_custom = {}
        self.step_type = options.fd_step_type
        self.step_type_custom = {}
        self.relative_threshold = 1.0e-4

        driver = self.pa.wflow._parent
        driver_params = []
        driver_targets = []
        if hasattr(driver, 'get_parameters'):
            driver_params = self.pa.wflow._parent.get_parameters()
            driver_targets = driver.list_param_targets()
        in_size = 0
        for j, srcs in enumerate(self.inputs):

            low = high = None

            # Support for parameter groups
            if isinstance(srcs, basestring):
                srcs = [srcs]

            # Local stepsize support
            meta = self.scope.get_metadata(
                self.scope._depgraph.base_var(srcs[0]))

            if 'fd_step' in meta:
                self.fd_step[j] = meta['fd_step']

            if 'low' in meta:
                low = meta['low']
            if 'high' in meta:
                high = meta['high']

            if srcs[0] in driver_targets:
                if srcs[0] in driver_params:
                    param = driver_params[srcs[0]]
                    if param.fd_step is not None:
                        self.fd_step[j] = param.fd_step
                    if param.low is not None:
                        low = param.low
                    if param.high is not None:
                        high = param.high
                else:
                    # have to check through all the param groups
                    for param_group in driver_params:
                        is_fd_step_not_set = is_low_not_set = is_high_not_set = True
                        if not isinstance(param_group, str) and \
                           srcs[0] in param_group:
                            param = driver_params[param_group]
                            if is_fd_step_not_set and param.fd_step is not None:
                                self.fd_step[j] = param.fd_step
                                is_fd_step_not_set = False
                            if is_low_not_set and param.low is not None:
                                low = param.low
                                is_low_not_set = False
                            if is_high_not_set and param.high is not None:
                                high = param.high
                                is_high_not_set = False

            if 'fd_step_type' in meta:
                self.step_type_custom[j] = meta['fd_step_type']
                step_type = self.step_type_custom[j]
            else:
                step_type = self.step_type

            # Bounds scaled
            if step_type == 'bounds_scaled':
                if low is None and high is None:
                    raise RuntimeError(
                        "For variable '%s', a finite "
                        "difference step type of "
                        "bounds_scaled is used but required low and "
                        "high values are not set" % srcs[0])
                if low == -float_info.max:
                    raise RuntimeError("For variable '%s', a finite "
                                       "difference step type of "
                                       "bounds_scaled is used but required "
                                       "low value is not set" % srcs[0])
                if high == float_info.max:
                    raise RuntimeError("For variable '%s', a finite "
                                       "difference step type of "
                                       "bounds_scaled is used but required "
                                       "high value is not set" % srcs[0])
                self.fd_step[j] = (high - low) * self.fd_step[j]

            if 'fd_form' in meta:
                self.form_custom[j] = meta['fd_form']

            val = self.scope.get(srcs[0])
            width = flattened_size(srcs[0], val, self.scope)
            for src in srcs:
                self.in_bounds[src] = (in_size, in_size + width)
            in_size += width

        out_size = 0
        for src in self.outputs:
            val = self.scope.get(src)
            width = flattened_size(src, val)
            self.out_bounds[src] = (out_size, out_size + width)
            out_size += width

        self.J = zeros((out_size, in_size))
        self.y_base = zeros((out_size, ))
        self.x = zeros((in_size, ))
        self.y = zeros((out_size, ))
        self.y2 = zeros((out_size, ))