Example #1
0
 def component_tensor(self, o, Ap, ii):
     if isinstance(Ap, Zero):
         op = self.independent_operator(o)
     else:
         Ap, jj = as_scalar(Ap)
         op = as_tensor(Ap, ii.indices() + jj)
     return op
Example #2
0
 def component_tensor(self, o, Ap, ii):
     if isinstance(Ap, Zero):
         op = self.independent_operator(o)
     else:
         Ap, jj = as_scalar(Ap)
         op = as_tensor(Ap, ii.indices() + jj)
     return op
    def component_tensor(self, o):
        A, ii = o.operands()
        A, Ap = self.visit(A)
        o = self.reuse_if_possible(o, A, ii)

        if isinstance(Ap, Zero):
            op = self._make_zero_diff(o)
        else:
            Ap, jj = as_scalar(Ap)
            op = ComponentTensor(Ap, ii._indices + jj)
        return (o, op)
    def component_tensor(self, o):
        A, ii = o.operands()
        A, Ap = self.visit(A)
        o = self.reuse_if_possible(o, A, ii)

        if isinstance(Ap, Zero):
            op = self._make_zero_diff(o)
        else:
            Ap, jj = as_scalar(Ap)
            op = ComponentTensor(Ap, ii._indices + jj)
        return (o, op)
    def division(self, o, a, b):
        f, fp = a
        g, gp = b
        o = self.reuse_if_possible(o, f, g)

        ufl_assert(is_ufl_scalar(f), "Not expecting nonscalar nominator")
        ufl_assert(is_true_ufl_scalar(g), "Not expecting nonscalar denominator")

        #do_df = 1/g
        #do_dg = -h/g
        #op = do_df*fp + do_df*gp
        #op = (fp - o*gp) / g

        # Get o and gp as scalars, multiply, then wrap as a tensor again
        so, oi = as_scalar(o)
        sgp, gi = as_scalar(gp)
        o_gp = so*sgp
        if oi or gi:
            o_gp = as_tensor(o_gp, oi + gi)
        op = (fp - o_gp) / g

        return (o, op)
Example #6
0
    def division(self, o, fp, gp):
        f, g = o.ufl_operands

        if not is_ufl_scalar(f):
            error("Not expecting nonscalar nominator")
        if not is_true_ufl_scalar(g):
            error("Not expecting nonscalar denominator")

        # do_df = 1/g
        # do_dg = -h/g
        # op = do_df*fp + do_df*gp
        # op = (fp - o*gp) / g

        # Get o and gp as scalars, multiply, then wrap as a tensor
        # again
        so, oi = as_scalar(o)
        sgp, gi = as_scalar(gp)
        o_gp = so * sgp
        if oi or gi:
            o_gp = as_tensor(o_gp, oi + gi)
        op = (fp - o_gp) / g

        return op
Example #7
0
    def division(self, o, fp, gp):
        f, g = o.ufl_operands

        if not is_ufl_scalar(f):
            error("Not expecting nonscalar nominator")
        if not is_true_ufl_scalar(g):
            error("Not expecting nonscalar denominator")

        # do_df = 1/g
        # do_dg = -h/g
        # op = do_df*fp + do_df*gp
        # op = (fp - o*gp) / g

        # Get o and gp as scalars, multiply, then wrap as a tensor
        # again
        so, oi = as_scalar(o)
        sgp, gi = as_scalar(gp)
        o_gp = so * sgp
        if oi or gi:
            o_gp = as_tensor(o_gp, oi + gi)
        op = (fp - o_gp) / g

        return op
    def division(self, o, a, b):
        f, fp = a
        g, gp = b
        o = self.reuse_if_possible(o, f, g)

        ufl_assert(is_ufl_scalar(f), "Not expecting nonscalar nominator")
        ufl_assert(is_true_ufl_scalar(g),
                   "Not expecting nonscalar denominator")

        #do_df = 1/g
        #do_dg = -h/g
        #op = do_df*fp + do_df*gp
        #op = (fp - o*gp) / g

        # Get o and gp as scalars, multiply, then wrap as a tensor again
        so, oi = as_scalar(o)
        sgp, gi = as_scalar(gp)
        o_gp = so * sgp
        if oi or gi:
            o_gp = as_tensor(o_gp, oi + gi)
        op = (fp - o_gp) / g

        return (o, op)
    def coefficient(self, o):
        # Define dw/dw := d/ds [w + s v] = v

        debug("In CoefficientAD.coefficient:")
        debug("o = %s" % o)
        debug("self._w = %s" % self._w)
        debug("self._v = %s" % self._v)

        # Find o among w
        for (w, v) in izip(self._w, self._v):
            if o == w:
                return (w, v)

        # If o is not among coefficient derivatives, return do/dw=0
        oprimesum = Zero(o.shape())
        oprimes = self._cd._data.get(o)
        if oprimes is None:
            if self._cd._data:
                # TODO: Make it possible to silence this message in particular?
                #       It may be good to have for debugging...
                warning("Assuming d{%s}/d{%s} = 0." % (o, self._w))
        else:
            # Make sure we have a tuple to match the self._v tuple
            if not isinstance(oprimes, tuple):
                oprimes = (oprimes,)
                ufl_assert(len(oprimes) == len(self._v), "Got a tuple of arguments, "+\
                               "expecting a matching tuple of coefficient derivatives.")

            # Compute do/dw_j = do/dw_h : v.
            # Since we may actually have a tuple of oprimes and vs in a
            # 'mixed' space, sum over them all to get the complete inner
            # product. Using indices to define a non-compound inner product.
            for (oprime, v) in izip(oprimes, self._v):
                so, oi = as_scalar(oprime)
                rv = len(v.shape())
                oi1 = oi[:-rv]
                oi2 = oi[-rv:]
                prod = so*v[oi2]
                if oi1:
                    oprimesum += as_tensor(prod, oi1)
                else:
                    oprimesum += prod

        # Example:
        # (f : g) -> (dfdu : v) : g + ditto
        # shape(f) == shape(g) == shape(dfdu : v)
        # shape(dfdu) == shape(f) + shape(v)

        return (o, oprimesum)
    def coefficient(self, o):
        # Define dw/dw := d/ds [w + s v] = v

        debug("In CoefficientAD.coefficient:")
        debug("o = %s" % o)
        debug("self._w = %s" % self._w)
        debug("self._v = %s" % self._v)

        # Find o among w
        for (w, v) in izip(self._w, self._v):
            if o == w:
                return (w, v)

        # If o is not among coefficient derivatives, return do/dw=0
        oprimesum = Zero(o.shape())
        oprimes = self._cd._data.get(o)
        if oprimes is None:
            if self._cd._data:
                # TODO: Make it possible to silence this message in particular?
                #       It may be good to have for debugging...
                warning("Assuming d{%s}/d{%s} = 0." % (o, self._w))
        else:
            # Make sure we have a tuple to match the self._v tuple
            if not isinstance(oprimes, tuple):
                oprimes = (oprimes, )
                ufl_assert(len(oprimes) == len(self._v), "Got a tuple of arguments, "+\
                               "expecting a matching tuple of coefficient derivatives.")

            # Compute do/dw_j = do/dw_h : v.
            # Since we may actually have a tuple of oprimes and vs in a
            # 'mixed' space, sum over them all to get the complete inner
            # product. Using indices to define a non-compound inner product.
            for (oprime, v) in izip(oprimes, self._v):
                so, oi = as_scalar(oprime)
                rv = len(v.shape())
                oi1 = oi[:-rv]
                oi2 = oi[-rv:]
                prod = so * v[oi2]
                if oi1:
                    oprimesum += as_tensor(prod, oi1)
                else:
                    oprimesum += prod

        # Example:
        # (f : g) -> (dfdu : v) : g + ditto
        # shape(f) == shape(g) == shape(dfdu : v)
        # shape(dfdu) == shape(f) + shape(v)

        return (o, oprimesum)
Example #11
0
    def coefficient(self, o):
        # Define dw/dw := d/ds [w + s v] = v

        # Return corresponding argument if we can find o among w
        do = self._w2v.get(o)
        if do is not None:
            return do

        # Look for o among coefficient derivatives
        dos = self._cd.get(o)
        if dos is None:
            # If o is not among coefficient derivatives, return
            # do/dw=0
            do = Zero(o.ufl_shape)
            return do
        else:
            # Compute do/dw_j = do/dw_h : v.
            # Since we may actually have a tuple of oprimes and vs in a
            # 'mixed' space, sum over them all to get the complete inner
            # product. Using indices to define a non-compound inner product.

            # Example:
            # (f:g) -> (dfdu:v):g + f:(dgdu:v)
            # shape(dfdu) == shape(f) + shape(v)
            # shape(f) == shape(g) == shape(dfdu : v)

            # Make sure we have a tuple to match the self._v tuple
            if not isinstance(dos, tuple):
                dos = (dos, )
            if len(dos) != len(self._v):
                error(
                    "Got a tuple of arguments, expecting a matching tuple of coefficient derivatives."
                )
            dosum = Zero(o.ufl_shape)
            for do, v in zip(dos, self._v):
                so, oi = as_scalar(do)
                rv = len(v.ufl_shape)
                oi1 = oi[:-rv]
                oi2 = oi[-rv:]
                prod = so * v[oi2]
                if oi1:
                    dosum += as_tensor(prod, oi1)
                else:
                    dosum += prod
            return dosum
Example #12
0
    def coefficient(self, o):
        # Define dw/dw := d/ds [w + s v] = v

        # Return corresponding argument if we can find o among w
        do = self._w2v.get(o)
        if do is not None:
            return do

        # Look for o among coefficient derivatives
        dos = self._cd.get(o)
        if dos is None:
            # If o is not among coefficient derivatives, return
            # do/dw=0
            do = Zero(o.ufl_shape)
            return do
        else:
            # Compute do/dw_j = do/dw_h : v.
            # Since we may actually have a tuple of oprimes and vs in a
            # 'mixed' space, sum over them all to get the complete inner
            # product. Using indices to define a non-compound inner product.

            # Example:
            # (f:g) -> (dfdu:v):g + f:(dgdu:v)
            # shape(dfdu) == shape(f) + shape(v)
            # shape(f) == shape(g) == shape(dfdu : v)

            # Make sure we have a tuple to match the self._v tuple
            if not isinstance(dos, tuple):
                dos = (dos,)
            if len(dos) != len(self._v):
                error("Got a tuple of arguments, expecting a matching tuple of coefficient derivatives.")
            dosum = Zero(o.ufl_shape)
            for do, v in zip(dos, self._v):
                so, oi = as_scalar(do)
                rv = len(v.ufl_shape)
                oi1 = oi[:-rv]
                oi2 = oi[-rv:]
                prod = so*v[oi2]
                if oi1:
                    dosum += as_tensor(prod, oi1)
                else:
                    dosum += prod
            return dosum
 def product(self, o, *ops):
     # Start with a zero with the right shape and indices
     fp = self._make_zero_diff(o)
     # Get operands and their derivatives
     ops2, dops2 = unzip(ops)
     o = self.reuse_if_possible(o, *ops2)
     for i in xrange(len(ops)):
         # Get scalar representation of differentiated value of operand i
         dop = dops2[i]
         dop, ii = as_scalar(dop)
         # Replace operand i with its differentiated value in product
         fpoperands = ops2[:i] + [dop] + ops2[i+1:]
         p = Product(*fpoperands)
         # Wrap product in tensor again
         if ii:
             p = as_tensor(p, ii)
         # Accumulate terms
         fp += p
     return (o, fp)
 def product(self, o, *ops):
     # Start with a zero with the right shape and indices
     fp = self._make_zero_diff(o)
     # Get operands and their derivatives
     ops2, dops2 = unzip(ops)
     o = self.reuse_if_possible(o, *ops2)
     for i in xrange(len(ops)):
         # Get scalar representation of differentiated value of operand i
         dop = dops2[i]
         dop, ii = as_scalar(dop)
         # Replace operand i with its differentiated value in product
         fpoperands = ops2[:i] + [dop] + ops2[i + 1:]
         p = Product(*fpoperands)
         # Wrap product in tensor again
         if ii:
             p = as_tensor(p, ii)
         # Accumulate terms
         fp += p
     return (o, fp)
Example #15
0
    def grad(self, g):
        # If we hit this type, it has already been propagated to a
        # coefficient (or grad of a coefficient), # FIXME: Assert
        # this!  so we need to take the gradient of the variation or
        # return zero.  Complications occur when dealing with
        # derivatives w.r.t. single components...

        # Figure out how many gradients are around the inner terminal
        ngrads = 0
        o = g
        while isinstance(o, Grad):
            o, = o.ufl_operands
            ngrads += 1
        if not isinstance(o, FormArgument):
            error("Expecting gradient of a FormArgument, not %s" %
                  ufl_err_str(o))

        def apply_grads(f):
            for i in range(ngrads):
                f = Grad(f)
            return f

        # Find o among all w without any indexing, which makes this
        # easy
        for (w, v) in zip(self._w, self._v):
            if o == w and isinstance(v, FormArgument):
                # Case: d/dt [w + t v]
                return apply_grads(v)

        # If o is not among coefficient derivatives, return do/dw=0
        gprimesum = Zero(g.ufl_shape)

        def analyse_variation_argument(v):
            # Analyse variation argument
            if isinstance(v, FormArgument):
                # Case: d/dt [w[...] + t v]
                vval, vcomp = v, ()
            elif isinstance(v, Indexed):
                # Case: d/dt [w + t v[...]]
                # Case: d/dt [w[...] + t v[...]]
                vval, vcomp = v.ufl_operands
                vcomp = tuple(vcomp)
            else:
                error("Expecting argument or component of argument.")
            if not all(isinstance(k, FixedIndex) for k in vcomp):
                error("Expecting only fixed indices in variation.")
            return vval, vcomp

        def compute_gprimeterm(ngrads, vval, vcomp, wshape, wcomp):
            # Apply gradients directly to argument vval, and get the
            # right indexed scalar component(s)
            kk = indices(ngrads)
            Dvkk = apply_grads(vval)[vcomp + kk]
            # Place scalar component(s) Dvkk into the right tensor
            # positions
            if wshape:
                Ejj, jj = unit_indexed_tensor(wshape, wcomp)
            else:
                Ejj, jj = 1, ()
            gprimeterm = as_tensor(Ejj * Dvkk, jj + kk)
            return gprimeterm

        # Accumulate contributions from variations in different
        # components
        for (w, v) in zip(self._w, self._v):

            # Analyse differentiation variable coefficient
            if isinstance(w, FormArgument):
                if not w == o:
                    continue
                wshape = w.ufl_shape

                if isinstance(v, FormArgument):
                    # Case: d/dt [w + t v]
                    return apply_grads(v)

                elif isinstance(v, ListTensor):
                    # Case: d/dt [w + t <...,v,...>]
                    for wcomp, vsub in unwrap_list_tensor(v):
                        if not isinstance(vsub, Zero):
                            vval, vcomp = analyse_variation_argument(vsub)
                            gprimesum = gprimesum + compute_gprimeterm(
                                ngrads, vval, vcomp, wshape, wcomp)

                else:
                    if wshape != ():
                        error("Expecting scalar coefficient in this branch.")
                    # Case: d/dt [w + t v[...]]
                    wval, wcomp = w, ()

                    vval, vcomp = analyse_variation_argument(v)
                    gprimesum = gprimesum + compute_gprimeterm(
                        ngrads, vval, vcomp, wshape, wcomp)

            elif isinstance(
                    w, Indexed
            ):  # This path is tested in unit tests, but not actually used?
                # Case: d/dt [w[...] + t v[...]]
                # Case: d/dt [w[...] + t v]
                wval, wcomp = w.ufl_operands
                if not wval == o:
                    continue
                assert isinstance(wval, FormArgument)
                if not all(isinstance(k, FixedIndex) for k in wcomp):
                    error(
                        "Expecting only fixed indices in differentiation variable."
                    )
                wshape = wval.ufl_shape

                vval, vcomp = analyse_variation_argument(v)
                gprimesum = gprimesum + compute_gprimeterm(
                    ngrads, vval, vcomp, wshape, wcomp)

            else:
                error("Expecting coefficient or component of coefficient.")

        # FIXME: Handle other coefficient derivatives: oprimes =
        # self._cd.get(o)

        if 0:
            oprimes = self._cd.get(o)
            if oprimes is None:
                if self._cd:
                    # TODO: Make it possible to silence this message
                    #       in particular?  It may be good to have for
                    #       debugging...
                    warning("Assuming d{%s}/d{%s} = 0." % (o, self._w))
            else:
                # Make sure we have a tuple to match the self._v tuple
                if not isinstance(oprimes, tuple):
                    oprimes = (oprimes, )
                    if len(oprimes) != len(self._v):
                        error("Got a tuple of arguments, expecting a"
                              " matching tuple of coefficient derivatives.")

                # Compute dg/dw_j = dg/dw_h : v.
                # Since we may actually have a tuple of oprimes and vs
                # in a 'mixed' space, sum over them all to get the
                # complete inner product. Using indices to define a
                # non-compound inner product.
                for (oprime, v) in zip(oprimes, self._v):
                    error("FIXME: Figure out how to do this with ngrads")
                    so, oi = as_scalar(oprime)
                    rv = len(v.ufl_shape)
                    oi1 = oi[:-rv]
                    oi2 = oi[-rv:]
                    prod = so * v[oi2]
                    if oi1:
                        gprimesum += as_tensor(prod, oi1)
                    else:
                        gprimesum += prod

        return gprimesum
Example #16
0
    def grad(self, g):
        # If we hit this type, it has already been propagated to a
        # coefficient (or grad of a coefficient), # FIXME: Assert
        # this!  so we need to take the gradient of the variation or
        # return zero.  Complications occur when dealing with
        # derivatives w.r.t. single components...

        # Figure out how many gradients are around the inner terminal
        ngrads = 0
        o = g
        while isinstance(o, Grad):
            o, = o.ufl_operands
            ngrads += 1
        if not isinstance(o, FormArgument):
            error("Expecting gradient of a FormArgument, not %s" % ufl_err_str(o))

        def apply_grads(f):
            for i in range(ngrads):
                f = Grad(f)
            return f

        # Find o among all w without any indexing, which makes this
        # easy
        for (w, v) in zip(self._w, self._v):
            if o == w and isinstance(v, FormArgument):
                # Case: d/dt [w + t v]
                return apply_grads(v)

        # If o is not among coefficient derivatives, return do/dw=0
        gprimesum = Zero(g.ufl_shape)

        def analyse_variation_argument(v):
            # Analyse variation argument
            if isinstance(v, FormArgument):
                # Case: d/dt [w[...] + t v]
                vval, vcomp = v, ()
            elif isinstance(v, Indexed):
                # Case: d/dt [w + t v[...]]
                # Case: d/dt [w[...] + t v[...]]
                vval, vcomp = v.ufl_operands
                vcomp = tuple(vcomp)
            else:
                error("Expecting argument or component of argument.")
            if not all(isinstance(k, FixedIndex) for k in vcomp):
                error("Expecting only fixed indices in variation.")
            return vval, vcomp

        def compute_gprimeterm(ngrads, vval, vcomp, wshape, wcomp):
            # Apply gradients directly to argument vval, and get the
            # right indexed scalar component(s)
            kk = indices(ngrads)
            Dvkk = apply_grads(vval)[vcomp+kk]
            # Place scalar component(s) Dvkk into the right tensor
            # positions
            if wshape:
                Ejj, jj = unit_indexed_tensor(wshape, wcomp)
            else:
                Ejj, jj = 1, ()
            gprimeterm = as_tensor(Ejj*Dvkk, jj+kk)
            return gprimeterm

        # Accumulate contributions from variations in different
        # components
        for (w, v) in zip(self._w, self._v):

            # Analyse differentiation variable coefficient
            if isinstance(w, FormArgument):
                if not w == o:
                    continue
                wshape = w.ufl_shape

                if isinstance(v, FormArgument):
                    # Case: d/dt [w + t v]
                    return apply_grads(v)

                elif isinstance(v, ListTensor):
                    # Case: d/dt [w + t <...,v,...>]
                    for wcomp, vsub in unwrap_list_tensor(v):
                        if not isinstance(vsub, Zero):
                            vval, vcomp = analyse_variation_argument(vsub)
                            gprimesum = gprimesum + compute_gprimeterm(ngrads, vval, vcomp, wshape, wcomp)

                else:
                    if wshape != ():
                        error("Expecting scalar coefficient in this branch.")
                    # Case: d/dt [w + t v[...]]
                    wval, wcomp = w, ()

                    vval, vcomp = analyse_variation_argument(v)
                    gprimesum = gprimesum + compute_gprimeterm(ngrads, vval,
                                                               vcomp, wshape,
                                                               wcomp)

            elif isinstance(w, Indexed):  # This path is tested in unit tests, but not actually used?
                # Case: d/dt [w[...] + t v[...]]
                # Case: d/dt [w[...] + t v]
                wval, wcomp = w.ufl_operands
                if not wval == o:
                    continue
                assert isinstance(wval, FormArgument)
                if not all(isinstance(k, FixedIndex) for k in wcomp):
                    error("Expecting only fixed indices in differentiation variable.")
                wshape = wval.ufl_shape

                vval, vcomp = analyse_variation_argument(v)
                gprimesum = gprimesum + compute_gprimeterm(ngrads, vval, vcomp, wshape, wcomp)

            else:
                error("Expecting coefficient or component of coefficient.")

        # FIXME: Handle other coefficient derivatives: oprimes =
        # self._cd.get(o)

        if 0:
            oprimes = self._cd.get(o)
            if oprimes is None:
                if self._cd:
                    # TODO: Make it possible to silence this message
                    #       in particular?  It may be good to have for
                    #       debugging...
                    warning("Assuming d{%s}/d{%s} = 0." % (o, self._w))
            else:
                # Make sure we have a tuple to match the self._v tuple
                if not isinstance(oprimes, tuple):
                    oprimes = (oprimes,)
                    if len(oprimes) != len(self._v):
                        error("Got a tuple of arguments, expecting a"
                              " matching tuple of coefficient derivatives.")

                # Compute dg/dw_j = dg/dw_h : v.
                # Since we may actually have a tuple of oprimes and vs
                # in a 'mixed' space, sum over them all to get the
                # complete inner product. Using indices to define a
                # non-compound inner product.
                for (oprime, v) in zip(oprimes, self._v):
                    error("FIXME: Figure out how to do this with ngrads")
                    so, oi = as_scalar(oprime)
                    rv = len(v.ufl_shape)
                    oi1 = oi[:-rv]
                    oi2 = oi[-rv:]
                    prod = so*v[oi2]
                    if oi1:
                        gprimesum += as_tensor(prod, oi1)
                    else:
                        gprimesum += prod

        return gprimesum