Пример #1
0
 def get_width(self, attr):
     """Return the flattened width of the value of the given attribute."""
     width = self._width_cache.get(attr, _missing)
     if width is _missing:
         param = from_PA_var(attr)
         self._width_cache[attr] = width = flattened_size(
             param, self.scope.get(param), self.scope)
     return width
Пример #2
0
    def get_dependents(self, fixed_point=False):
        """Returns a list of current values of the dependents. This includes
        both constraints and severed sources.

        fixed_point: bool
            Set to True if we are doing fixed-point iteration instead of a more
            general solve. In such a case, we need to swap the order of the
            constraints to match the parameter order. We also may need to swap
            signs on the constraints.
        """

        deps = self._parent.eval_eq_constraints(self.scope)

        # Reorder for fixed point
        if fixed_point is True:
            newdeps = zeros(len(deps))
            eqcons = self._parent.get_eq_constraints()
            old_j = 0
            for key, value in eqcons.iteritems():
                con_targets = value.get_referenced_varpaths()
                new_j = 0
                for params in self._parent.list_param_group_targets():
                    if params[0] == value.rhs.text:
                        newdeps[new_j] = deps[old_j]
                    elif params[0] == value.lhs.text:
                        newdeps[new_j] = -deps[old_j]
                    new_j += 1
                old_j += 1
            deps = newdeps

        sev_deps = []
        for src, target in self._severed_edges:

            if not isinstance(target, str):
                target = target[0]

            target = from_PA_var(target)
            src = from_PA_var(src)
            src_val = self.scope.get(src)
            targ_val = self.scope.get(target)
            res = flattened_value(src, src_val) - flattened_value(
                target, targ_val)

            sev_deps.extend(res)

        return hstack((deps, sev_deps))
Пример #3
0
    def get_dependents(self, fixed_point=False):
        """Returns a list of current values of the dependents. This includes
        both constraints and severed sources.

        fixed_point: bool
            Set to True if we are doing fixed-point iteration instead of a more
            general solve. In such a case, we need to swap the order of the
            constraints to match the parameter order. We also may need to swap
            signs on the constraints.
        """

        deps = self._parent.eval_eq_constraints(self.scope)

        # Reorder for fixed point
        if fixed_point == True:
            newdeps = zeros(len(deps))
            eqcons = self._parent.get_eq_constraints()
            old_j = 0
            for key, value in eqcons.iteritems():
                con_targets = value.get_referenced_varpaths()
                new_j = 0
                for params in self._parent.list_param_group_targets():
                    if params[0] == value.rhs.text:
                        newdeps[new_j] = deps[old_j]
                    elif params[0] == value.lhs.text:
                        newdeps[new_j] = -deps[old_j]
                    new_j += 1
                old_j += 1
            deps = newdeps

        sev_deps = []
        for src, target in self._severed_edges:

            if not isinstance(target, str):
                target = target[0]

            target = from_PA_var(target)
            src = from_PA_var(src)
            src_val = self.scope.get(src)
            targ_val = self.scope.get(target)
            res = flattened_value(src, src_val) - flattened_value(target, targ_val)

            sev_deps.extend(res)

        return hstack((deps, sev_deps))
 def get_width(self, attr):
     """Return the flattened width of the value of the given attribute."""
     width = self._width_cache.get(attr, _missing)
     if width is _missing:
         param = from_PA_var(attr)
         self._width_cache[attr] = width = flattened_size(param,
                                                          self.scope.get(param),
                                                          self.scope)
     return width
Пример #5
0
    def set_independents(self, val):
        """Sets all dependent variables to the values in the input array
        `val`. This includes both parameters and severed targets.
        """

        nparam = self._parent.total_parameters()
        if nparam > 0:
            self._parent.set_parameters(val[:nparam].flatten())

        if len(self._severed_edges) > 0:
            i = nparam
            for src, targets in self._mapped_severed_edges:
                if isinstance(targets, str):
                    targets = [targets]

                i1, i2 = self.get_bounds(src)
                if isinstance(i1, list):
                    width = len(i1)
                else:
                    width = i2-i1

                i1 = i
                i2 = i + width

                for target in targets:

                    target = from_PA_var(target)
                    old_val = self.scope.get(target)

                    if isinstance(old_val, float):
                        new_val = float(val[i1:i2])
                    elif isinstance(old_val, ndarray):
                        shape = old_val.shape
                        if len(shape) > 1:
                            new_val = val[i1:i2].copy()
                            new_val = new_val.reshape(shape)
                        else:
                            new_val = val[i1:i2].copy()
                    elif isinstance(old_val, VariableTree):
                        new_val = old_val.copy()
                        self._vtree_set(target, new_val, val[i1:i2], i1)
                    else:
                        msg = "Variable %s is of type %s." % (target, type(old_val)) + \
                              " This type is not supported by the MDA Solver."
                        self.scope.raise_exception(msg, RuntimeError)

                    i += width

                    # Poke new value into the input end of the edge.
                    self.scope.set(target, new_val, force=True)

                    # Prevent OpenMDAO from stomping on our poked input.
                    self.scope.set_valid([target.split('[',1)[0]], True)
Пример #6
0
    def get_dependents(self):
        """Returns a list of current values of the dependents. This includes
        both constraints and severed sources.
        """

        deps = self._parent.eval_eq_constraints(self.scope)
        sev_deps = []
        for src, target in self._severed_edges:

            if not isinstance(target, str):
                target = target[0]

            target = from_PA_var(target)
            src = from_PA_var(src)
            src_val = self.scope.get(src)
            targ_val = self.scope.get(target)
            res = flattened_value(src, src_val) - flattened_value(target, targ_val)

            sev_deps.extend(res)

        return hstack((deps, sev_deps))
Пример #7
0
    def set_independents(self, val):
        """Sets all dependent variables to the values in the input array
        `val`. This includes both parameters and severed targets.
        """
        bounds = self._bounds_cache
        nparam = self._parent.total_parameters()
        if nparam > 0:
            self._parent.set_parameters(val[:nparam].flatten())

        if len(self._severed_edges) > 0:
            i = nparam
            for src, targets in self._mapped_severed_edges:
                if isinstance(targets, str):
                    targets = [targets]

                i1, i2 = bounds[src]
                if isinstance(i1, list):
                    width = len(i1)
                else:
                    width = i2 - i1

                i1 = i
                i2 = i + width

                for target in targets:

                    target = from_PA_var(target)
                    old_val = self.scope.get(target)

                    if isinstance(old_val, float):
                        new_val = float(val[i1:i2])
                    elif isinstance(old_val, ndarray):
                        shape = old_val.shape
                        if len(shape) > 1:
                            new_val = val[i1:i2].copy()
                            new_val = new_val.reshape(shape)
                        else:
                            new_val = val[i1:i2].copy()
                    elif isinstance(old_val, VariableTree):
                        new_val = old_val.copy()
                        self._vtree_set(target, new_val, val[i1:i2], i1)
                    else:
                        msg = "Variable %s is of type %s." % (target, type(old_val)) + \
                              " This type is not supported by the MDA Solver."
                        self.scope.raise_exception(msg, RuntimeError)

                    i += width

                    # Poke new value into the input end of the edge.
                    self.scope.set(target, new_val, force=True)

                    # Prevent OpenMDAO from stomping on our poked input.
                    self.scope.set_valid([target.split('[', 1)[0]], True)
Пример #8
0
    def get_independents(self):
        """Returns a list of current values of the dependents. This includes
        both parameters and severed targets.
        """

        indeps = self._parent.eval_parameters(self.scope)
        sev_indeps = []
        for _, target in self._severed_edges:

            if not isinstance(target, str):
                target = target[0]

            target = from_PA_var(target)
            old_val = self.scope.get(target)

            sev_indeps.extend(flattened_value(target, old_val))

        return hstack((indeps, sev_indeps))
Пример #9
0
    def get_independents(self):
        """Returns a list of current values of the dependents. This includes
        both parameters and severed targets.
        """

        indeps = self.parent.eval_parameters(self.scope)
        sev_indeps = []
        for _, target in self._severed_edges:

            if not isinstance(target, str):
                target = target[0]

            target = from_PA_var(target)
            old_val = self.scope.get(target)

            sev_indeps.extend(flattened_value(target, old_val))

        return hstack((indeps, sev_indeps))
Пример #10
0
    def calc_gradient(self,
                      inputs=None,
                      outputs=None,
                      upscope=False,
                      mode='auto'):
        """Returns the gradient of the passed outputs with respect to
        all passed inputs.

        inputs: list of strings or tuples of strings
            List of input variables that we are taking derivatives with respect
            to. They must be within this workflow's scope. If no inputs are
            given, the parent driver's parameters are used. A tuple can be used
            to link inputs together.

        outputs: list of strings
            List of output variables that we are taking derivatives of.
            They must be within this workflow's scope. If no outputs are
            given, the parent driver's objectives and constraints are used.

        upscope: boolean
            This is set to True when our workflow is part of a subassembly that
            lies in a workflow that needs a gradient with respect to variables
            outside of this workflow, so that the caches can be reset.

        mode: string
            Set to 'forward' for forward mode, 'adjoint' for adjoint mode,
            'fd' for full-model finite difference (with fake finite
            difference disabled), or 'auto' to let OpenMDAO determine the
            correct mode.
        """

        self._J_cache = {}

        # User may request full-model finite difference.
        if self._parent.gradient_options.force_fd == True:
            mode = 'fd'

        # This function can be called from a parent driver's workflow for
        # assembly recursion. We have to clear our cache if that happens.
        # We also have to clear it next time we arrive back in our workflow.
        if upscope or self._upscoped:
            self._derivative_graph = None
            self._edges = None
            self._comp_edges = None

            self._upscoped = upscope

        dgraph = self.derivative_graph(inputs, outputs, fd=(mode == 'fd'))

        if 'mapped_inputs' in dgraph.graph:
            inputs = dgraph.graph['mapped_inputs']
            outputs = dgraph.graph['mapped_outputs']
        else:
            inputs = dgraph.graph['inputs']
            outputs = dgraph.graph['outputs']

        n_edge = self.initialize_residual()

        # cache Jacobians for comps that return them from provideJ

        # Size our Jacobian
        num_in = 0
        for item in inputs:

            # For parameter groups, only size the first
            if not isinstance(item, basestring):
                item = item[0]

            try:
                i1, i2 = self.get_bounds(item)
                if isinstance(i1, list):
                    num_in += len(i1)
                else:
                    num_in += i2 - i1
            except KeyError:
                val = self.scope.get(item)
                num_in += flattened_size(item, val, self.scope)

        num_out = 0
        for item in outputs:
            try:
                i1, i2 = self.get_bounds(item)
                if isinstance(i1, list):
                    num_out += len(i1)
                else:
                    num_out += i2 - i1
            except KeyError:
                val = self.scope.get(item)
                num_out += flattened_size(item, val, self.scope)

        shape = (num_out, num_in)

        # Auto-determine which mode to use based on Jacobian shape.
        if mode == 'auto':
            # TODO - additional determination based on presence of
            # apply_derivT

            if num_in > num_out:
                mode = 'adjoint'
            else:
                mode = 'forward'

        if mode == 'adjoint':
            J = calc_gradient_adjoint(self, inputs, outputs, n_edge, shape)
        elif mode in ['forward', 'fd']:
            J = calc_gradient(self, inputs, outputs, n_edge, shape)
        else:
            msg = "In calc_gradient, mode must be 'forward', 'adjoint', " + \
                  "'auto', or 'fd', but a value of %s was given." % mode
            self.scope.raise_exception(msg, RuntimeError)

        # Finally, we need to untransform the jacobian if any parameters have
        # scalers.
        #print 'edges:', self._edges
        if not hasattr(self._parent, 'get_parameters'):
            return J

        params = self._parent.get_parameters()

        if len(params) == 0:
            return J

        i = 0
        for group in inputs:

            if isinstance(group, str):
                group = [group]

            name = group[0]
            if len(group) > 1:
                pname = tuple([from_PA_var(aname) for aname in group])
            else:
                pname = from_PA_var(name)

            try:
                i1, i2 = self.get_bounds(name)
            except KeyError:
                continue

            if isinstance(i1, list):
                width = len(i1)
            else:
                width = i2 - i1

            if pname in params:
                scaler = params[pname].scaler
                if scaler != 1.0:
                    J[:, i:i + width] = J[:, i:i + width] * scaler

            i = i + width
        #print J
        return J
Пример #11
0
    def initialize_residual(self):
        """Creates the array that stores the residual. Also returns the
        number of edges.
        """
        dgraph = self.derivative_graph()
        if 'mapped_inputs' in dgraph.graph:
            inputs = dgraph.graph['mapped_inputs']
        else:
            inputs = dgraph.graph['inputs']

        basevars = set()
        edges = self.edge_list()
        implicit_edges = self.get_implicit_info()
        sortedkeys = sorted(implicit_edges)
        sortedkeys.extend(sorted(edges.keys()))

        nEdge = 0
        for src in sortedkeys:

            if src in implicit_edges:
                targets = implicit_edges[src]
                is_implicit = True
            else:
                targets = edges[src]
                is_implicit = False

            if isinstance(targets, str):
                targets = [targets]

            # Implicit source edges are tuples.
            if is_implicit == True:
                impli_edge = nEdge
                for resid in src:
                    unmap_src = from_PA_var(resid)

                    val = self.scope.get(unmap_src)
                    width = flattened_size(unmap_src, val, self.scope)

                    if isinstance(val, ndarray):
                        shape = val.shape
                    else:
                        shape = 1

                    bound = (impli_edge, impli_edge + width)
                    self.set_bounds(resid, bound)
                    basevars.add(resid)
                    impli_edge += width

            # Regular components
            else:

                # Only need to grab the source (or first target for param) to
                # figure out the size for the residual vector
                measure_src = src
                if '@in' in src:
                    idx = int(src[3:].split('[')[0])
                    inp = inputs[idx]
                    if not isinstance(inp, basestring):
                        inp = inp[0]
                    if inp in dgraph:
                        measure_src = inp
                    else:
                        measure_src = targets[0]
                elif src == '@fake':
                    for t in targets:
                        if not t.startswith('@'):
                            measure_src = t
                            break
                    else:
                        raise RuntimeError("malformed graph!")

                # Find our width, etc.
                unmap_src = from_PA_var(measure_src)
                val = self.scope.get(unmap_src)
                width = flattened_size(unmap_src, val, self.scope)
                if isinstance(val, ndarray):
                    shape = val.shape
                else:
                    shape = 1

                # Special poke for boundary node
                if is_boundary_node(dgraph, measure_src) or \
                   is_boundary_node(dgraph, dgraph.base_var(measure_src)):
                    bound = (nEdge, nEdge + width)
                    self.set_bounds(measure_src, bound)

                src_noidx = src.split('[', 1)[0]

                # Poke our source data

                # Array slice of src that is already allocated
                if '[' in src and src_noidx in basevars:
                    _, _, idx = src.partition('[')
                    basebound = self.get_bounds(src_noidx)
                    if not '@in' in src_noidx:
                        unmap_src = from_PA_var(src_noidx)
                        val = self.scope.get(unmap_src)
                        shape = val.shape
                    offset = basebound[0]
                    istring, ix = flatten_slice(idx,
                                                shape,
                                                offset=offset,
                                                name='ix')
                    bound = (istring, ix)
                    # Already allocated
                    width = 0

                # Input-input connection to implicit state
                elif src_noidx in basevars:
                    bound = self.get_bounds(src_noidx)
                    width = 0

                # Normal src
                else:
                    bound = (nEdge, nEdge + width)

                self.set_bounds(src, bound)
                basevars.add(src)

            # Poke our target data
            impli_edge = nEdge
            for target in targets:

                # Handle States in implicit comps
                if is_implicit == True:

                    if isinstance(target, str):
                        target = [target]

                    unmap_targ = from_PA_var(target[0])
                    val = self.scope.get(unmap_targ)
                    imp_width = flattened_size(unmap_targ, val, self.scope)
                    if isinstance(val, ndarray):
                        shape = val.shape
                    else:
                        shape = 1

                    for itarget in target:
                        bound = (impli_edge, impli_edge + imp_width)
                        self.set_bounds(itarget, bound)
                        basevars.add(itarget)

                    impli_edge += imp_width
                    width = impli_edge - nEdge

                elif not target.startswith('@'):
                    self.set_bounds(target, bound)

            #print input_src, src, target, bound,
            nEdge += width
            impli_edge = nEdge

        # Initialize the residual vector on the first time through, and also
        # if for some reason the number of edges has changed.
        if self.res is None or nEdge != self.res.shape[0]:
            self.res = zeros((nEdge, 1))

        return nEdge
    def calc_gradient(self, inputs=None, outputs=None, upscope=False, mode='auto'):
        """Returns the gradient of the passed outputs with respect to
        all passed inputs.

        inputs: list of strings or tuples of strings
            List of input variables that we are taking derivatives with respect
            to. They must be within this workflow's scope. If no inputs are
            given, the parent driver's parameters are used. A tuple can be used
            to link inputs together.

        outputs: list of strings
            List of output variables that we are taking derivatives of.
            They must be within this workflow's scope. If no outputs are
            given, the parent driver's objectives and constraints are used.

        upscope: boolean
            This is set to True when our workflow is part of a subassembly that
            lies in a workflow that needs a gradient with respect to variables
            outside of this workflow, so that the caches can be reset.

        mode: string
            Set to 'forward' for forward mode, 'adjoint' for adjoint mode,
            'fd' for full-model finite difference (with fake finite
            difference disabled), or 'auto' to let OpenMDAO determine the
            correct mode.
        """

        self._J_cache = {}

        # User may request full-model finite difference.
        if self._parent.gradient_options.force_fd == True:
            mode = 'fd'

        # This function can be called from a parent driver's workflow for
        # assembly recursion. We have to clear our cache if that happens.
        # We also have to clear it next time we arrive back in our workflow.
        if upscope or self._upscoped:
            self._derivative_graph = None
            self._edges = None
            self._comp_edges = None

            self._upscoped = upscope

        dgraph = self.derivative_graph(inputs, outputs, fd=(mode == 'fd'))

        if 'mapped_inputs' in dgraph.graph:
            inputs = dgraph.graph['mapped_inputs']
            outputs = dgraph.graph['mapped_outputs']
        else:
            inputs = dgraph.graph['inputs']
            outputs = dgraph.graph['outputs']

        n_edge = self.initialize_residual()

        # cache Jacobians for comps that return them from provideJ


        # Size our Jacobian
        num_in = 0
        for item in inputs:

            # For parameter groups, only size the first
            if not isinstance(item, basestring):
                item = item[0]

            i1, i2 = self.get_bounds(item)
            if isinstance(i1, list):
                num_in += len(i1)
            else:
                num_in += i2-i1

        num_out = 0
        for item in outputs:
            i1, i2 = self.get_bounds(item)
            if isinstance(i1, list):
                num_out += len(i1)
            else:
                num_out += i2-i1

        shape = (num_out, num_in)

        # Auto-determine which mode to use based on Jacobian shape.
        if mode == 'auto':
            # TODO - additional determination based on presence of
            # apply_derivT

            if num_in > num_out:
                mode = 'adjoint'
            else:
                mode = 'forward'

        if mode == 'adjoint':
            J = calc_gradient_adjoint(self, inputs, outputs, n_edge, shape)
        elif mode in ['forward', 'fd']:
            J = calc_gradient(self, inputs, outputs, n_edge, shape)
        else:
            msg = "In calc_gradient, mode must be 'forward', 'adjoint', " + \
                  "'auto', or 'fd', but a value of %s was given." % mode
            self.scope.raise_exception(msg, RuntimeError)

        # Finally, we need to untransform the jacobian if any parameters have
        # scalers.

        if not hasattr(self._parent, 'get_parameters'):
            return J

        params = self._parent.get_parameters()

        if len(params) == 0:
            return J

        i = 0
        for group in inputs:

            if isinstance(group, str):
                group = [group]

            name = group[0]
            if len(group) > 1:
                pname = tuple([from_PA_var(aname) for aname in group])
            else:
                pname = from_PA_var(name)

            i1, i2 = self.get_bounds(name)

            if isinstance(i1, list):
                width = len(i1)
            else:
                width = i2-i1

            if pname in params:
                scaler = params[pname].scaler
                if scaler != 1.0:
                    J[:, i:i+width] = J[:, i:i+width]*scaler

            i = i + width
        #print J
        return J
    def initialize_residual(self):
        """Creates the array that stores the residual. Also returns the
        number of edges.
        """
        dgraph = self.derivative_graph()
        if 'mapped_inputs' in dgraph.graph:
            inputs = dgraph.graph['mapped_inputs']
        else:
            inputs = dgraph.graph['inputs']

        basevars = set()
        edges = self.edge_list()
        implicit_edges = self.get_implicit_info()
        sortedkeys = sorted(implicit_edges)
        sortedkeys.extend(sorted(edges.keys()))

        nEdge = 0
        for src in sortedkeys:

            if src in implicit_edges:
                targets = implicit_edges[src]
                is_implicit = True
            else:
                targets = edges[src]
                is_implicit = False

            if isinstance(targets, str):
                targets = [targets]

            # Implicit source edges are tuples.
            if is_implicit == True:
                impli_edge = nEdge
                for resid in src:
                    unmap_src = from_PA_var(resid)

                    val = self.scope.get(unmap_src)
                    width = flattened_size(unmap_src, val, self.scope)

                    if isinstance(val, ndarray):
                        shape = val.shape
                    else:
                        shape = 1

                    bound = (impli_edge, impli_edge+width)
                    self.set_bounds(resid, bound)
                    basevars.add(resid)
                    impli_edge += width

            # Regular components
            else:

                # Only need to grab the source (or first target for param) to
                # figure out the size for the residual vector
                measure_src = src
                if '@in' in src:
                    idx = int(src[3:].split('[')[0])
                    inp = inputs[idx]
                    if not isinstance(inp, basestring):
                        inp = inp[0]
                    if inp in dgraph:
                        measure_src = inp
                    else:
                        measure_src = targets[0]
                elif src == '@fake':
                    for t in targets:
                        if not t.startswith('@'):
                            measure_src = t
                            break
                    else:
                        raise RuntimeError("malformed graph!")

                # Find our width, etc.
                unmap_src = from_PA_var(measure_src)
                val = self.scope.get(unmap_src)
                width = flattened_size(unmap_src, val, self.scope)
                if isinstance(val, ndarray):
                    shape = val.shape
                else:
                    shape = 1

                # Special poke for boundary node
                if is_boundary_node(dgraph, measure_src) or \
                   is_boundary_node(dgraph, dgraph.base_var(measure_src)):
                    bound = (nEdge, nEdge+width)
                    self.set_bounds(measure_src, bound)

                src_noidx = src.split('[', 1)[0]

                # Poke our source data

                # Array slice of src that is already allocated
                if '[' in src and src_noidx in basevars:
                    _, _, idx = src.partition('[')
                    basebound = self.get_bounds(src_noidx)
                    if not '@in' in src_noidx:
                        unmap_src = from_PA_var(src_noidx)
                        val = self.scope.get(unmap_src)
                        shape = val.shape
                    offset = basebound[0]
                    istring, ix = flatten_slice(idx, shape, offset=offset,
                                                name='ix')
                    bound = (istring, ix)
                    # Already allocated
                    width = 0

                # Input-input connection to implicit state
                elif src_noidx in basevars:
                    bound = self.get_bounds(src_noidx)
                    width = 0

                # Normal src
                else:
                    bound = (nEdge, nEdge+width)

                self.set_bounds(src, bound)
                basevars.add(src)

            # Poke our target data
            impli_edge = nEdge
            for target in targets:

                # Handle States in implicit comps
                if is_implicit == True:

                    if isinstance(target, str):
                        target = [target]

                    unmap_targ = from_PA_var(target[0])
                    val = self.scope.get(unmap_targ)
                    imp_width = flattened_size(unmap_targ, val, self.scope)
                    if isinstance(val, ndarray):
                        shape = val.shape
                    else:
                        shape = 1

                    for itarget in target:
                        bound = (impli_edge, impli_edge+imp_width)
                        self.set_bounds(itarget, bound)
                        basevars.add(itarget)

                    impli_edge += imp_width
                    width = impli_edge - nEdge

                elif not target.startswith('@'):
                    self.set_bounds(target, bound)

            #print input_src, src, target, bound,
            nEdge += width
            impli_edge = nEdge

        # Initialize the residual vector on the first time through, and also
        # if for some reason the number of edges has changed.
        if self.res is None or nEdge != self.res.shape[0]:
            self.res = zeros((nEdge, 1))

        return nEdge
Пример #14
0
    def get_dependents(self, fixed_point=False):
        """Returns a list of current values of the dependents. This includes
        both constraints and severed sources.

        fixed_point: bool
            Set to True if we are doing fixed-point iteration instead of a more
            general solve. In such a case, we need to swap the order of the
            constraints to match the parameter order. We also may need to swap
            signs on the constraints.
        """

        parent = self.parent
        deps = array(parent.eval_eq_constraints(self.scope))
        # Reorder for fixed point
        if fixed_point is True:
            eqcons = parent.get_eq_constraints()

            rhs = {}
            lhs = {}
            i = 0
            for value in eqcons.itervalues():
                #make a mapping of position of each constraint
                rhs[value.rhs.text] = (i, value.size)
                lhs[value.lhs.text] = (i, value.size)
                i += value.size

            new_dep_index = empty(len(deps), dtype="int")
            new_dep_sign = empty(len(deps), dtype="int")
            k = 0
            for params in parent.list_param_group_targets():
                #for each param, grab the right map value and set the sign convention
                try:
                    j, size = rhs[params[0]]
                    new_dep_index[k:k +
                                  size] = j + arange(0, size, dtype="int")
                    new_dep_sign[k:k + size] = ones((size, ))
                    k += size
                except KeyError:  #wasn't in the rhs dict, try the lhs
                    try:
                        j, size = lhs[params[0]]
                        new_dep_index[k:k +
                                      size] = j + arange(0, size, dtype="int")
                        new_dep_sign[k:k + size] = -1 * ones(size)
                        k += size
                    except KeyError:
                        pass  #TODO: need to throw an error here. Why was there a param that didn't show up in the constraint

            #reset the deps array to the new order and sign
            deps = deps[new_dep_index] * new_dep_sign

        sev_deps = []
        for src, target in self._severed_edges:

            if not isinstance(target, str):
                target = target[0]

            target = from_PA_var(target)
            src = from_PA_var(src)
            src_val = self.scope.get(src)
            targ_val = self.scope.get(target)
            res = flattened_value(src, src_val) - flattened_value(
                target, targ_val)

            sev_deps.extend(res)

        return hstack((deps, sev_deps))
    def get_dependents(self, fixed_point=False):
        """Returns a list of current values of the dependents. This includes
        both constraints and severed sources.

        fixed_point: bool
            Set to True if we are doing fixed-point iteration instead of a more
            general solve. In such a case, we need to swap the order of the
            constraints to match the parameter order. We also may need to swap
            signs on the constraints.
        """

        parent = self.parent
        deps = array(parent.eval_eq_constraints(self.scope))
        # Reorder for fixed point
        if fixed_point is True:
            eqcons = parent.get_eq_constraints()

            rhs = {}
            lhs = {}
            i = 0
            for value in eqcons.itervalues():
                #make a mapping of position of each constraint
                rhs[value.rhs.text] = (i, value.size)
                lhs[value.lhs.text] = (i, value.size)
                i += value.size

            new_dep_index = empty(len(deps), dtype="int")
            new_dep_sign = empty(len(deps), dtype="int")
            k = 0
            for params in parent.list_param_group_targets():
                #for each param, grab the right map value and set the sign convention
                try:
                    j, size = rhs[params[0]]
                    new_dep_index[k:k+size] = j+arange(0, size, dtype="int")
                    new_dep_sign[k:k+size] = ones((size,))
                    k += size
                except KeyError: #wasn't in the rhs dict, try the lhs
                    try:
                        j, size = lhs[params[0]]
                        new_dep_index[k:k+size] = j+arange(0, size, dtype="int")
                        new_dep_sign[k:k+size] = -1*ones(size)
                        k += size
                    except KeyError:
                        pass #TODO: need to throw an error here. Why was there a param that didn't show up in the constraint

            #reset the deps array to the new order and sign
            deps = deps[new_dep_index]*new_dep_sign


        sev_deps = []
        for src, target in self._severed_edges:

            if not isinstance(target, str):
                target = target[0]

            target = from_PA_var(target)
            src = from_PA_var(src)
            src_val = self.scope.get(src)
            targ_val = self.scope.get(target)
            res = flattened_value(src, src_val) - flattened_value(target, targ_val)

            sev_deps.extend(res)

        return hstack((deps, sev_deps))