コード例 #1
0
 def compute_norm(self, field=None, **defaults_config):
     '''
     Proves ‖a ⊗ b ⊗ ... ⊗ y ⊗ z‖ = ‖a‖·‖b‖·...·‖y‖·‖z‖.
     '''
     from proveit.logic import EvaluationError
     from . import norm_of_tensor_prod, norm_preserving_tensor_prod
     _a = self.operands
     _i = _a.num_elements()
     _K = VecSpaces.get_field(field)
     vec_spaces = VecSpaces.known_vec_spaces(self.operands, field=_K)
     
     # See if all of the operand normalizations evaluate to one.
     all_norm_one = True
     try:
         for operand in self.operands:
             if isinstance(operand, ExprRange):
                 with defaults.temporary() as tmp_defaults:
                     tmp_defaults.assumptions = defaults.assumptions + (
                             operand.parameter_condition(),)
                     body_norm = operand.body.compute_norm()
                     if body_norm.rhs.evaluated() != one:
                         all_norm_one = False
                         break
             else:
                 if operand.compute_norm().rhs.evaluated() != one:
                     all_norm_one = False
                     break
     except (EvaluationError, NotImplementedError):
         all_norm_one = False
         pass
     if all_norm_one:
         thm = norm_preserving_tensor_prod
     else:
         thm = norm_of_tensor_prod
     return thm.instantiate({K: _K, i: _i, V: vec_spaces, a: _a})            
コード例 #2
0
ファイル: scalar_mult.py プロジェクト: PyProveIt/Prove-It
 def double_scaling_reduction(self, **defaults_config):
     from . import doubly_scaled_as_singly_scaled
     if not isinstance(self.scaled, ScalarMult):
         raise ValueError("'double_scaling_reduction' is only applicable "
                          "for a doubly nested ScalarMult")
     # Reduce doubly-nested ScalarMult
     _x = self.scaled.scaled
     # _V = VecSpaces.known_vec_space(_x)
     # the following is a little klunky, but trying to avoid the
     # use of a default field=Real if we're actually dealing with
     # complex scalars somewhere in the vector
     from proveit import free_vars
     if any([InSet(elem, Complex).proven() for elem in free_vars(self)]):
         _V = VecSpaces.known_vec_space(self, field=Complex)
     else:
         _V = VecSpaces.known_vec_space(self)
     _K = VecSpaces.known_field(_V)
     _alpha = self.scalar
     _beta = self.scaled.scalar
     return doubly_scaled_as_singly_scaled.instantiate({
         K: _K,
         V: _V,
         x: _x,
         alpha: _alpha,
         beta: _beta
     })
コード例 #3
0
    def shallow_simplification(self, *, must_evaluate=False,
                               **defaults_config):
        '''
        Returns a proven simplification equation for this TensorProd
        expression assuming the operands have been simplified.
        
        Currently deals only with:
        (1) simplifying a TensorProd(x) (i.e. a TensorProd with a
            single operand x) to x itself. For example,
            TensorProd(x) = x.
        (2) Ungrouping nested tensor products.
        (3) Factoring out scalars.
        '''

        if self.operands.is_single():
            from . import unary_tensor_prod_def
            _V = VecSpaces.known_vec_space(self.operand)
            _K = VecSpaces.known_field(_V)
            return unary_tensor_prod_def.instantiate(
                {K:_K, V:_V, A:self.operands[0]}, preserve_all=True)

        # for convenience updating our equation:
        expr = self
        eq = TransRelUpdater(expr)
        
        if TensorProd._simplification_directives_.ungroup:
            # ungroup the expression (disassociate nested additions).
            _n = 0
            length = expr.operands.num_entries() - 1
            # loop through all operands
            while _n < length:
                operand = expr.operands[_n]
                if isinstance(operand, TensorProd):
                    # if it is grouped, ungroup it
                    expr = eq.update(expr.disassociation(
                            _n, preserve_all=True))
                length = expr.operands.num_entries()
                _n += 1
        
        if TensorProd._simplification_directives_.factor_scalars:
            # Next, pull out scalar factors
            try:
                VecSpaces.known_vec_space(self)
            except ValueError:
                raise UnsatisfiedPrerequisites(
                        "No known vector space for %s"%self)
            for _k, operand in enumerate(expr.operands):
                if isinstance(operand, ScalarMult):
                    # Just pull out the first one we see and let
                    # recursive simplifications take care of any more.
                    # To make sure this happens we turn auto_simplify
                    # on, and those simpiflications should all be fair
                    # game as part of shallow_simplification.
                    expr = eq.update(expr.scalar_factorization(
                        _k, auto_simplify=True))
                    break
        
        # Future processing possible here.
        return eq.relation
コード例 #4
0
ファイル: scalar_mult.py プロジェクト: PyProveIt/Prove-It
 def compute_norm(self, **defaults_config):
     '''
     Proves ‖a v‖ = |a| ‖v‖.
     '''
     from proveit.linear_algebra.inner_products import scaled_norm
     vec_space = VecSpaces.known_vec_space(self.scaled)
     field = VecSpaces.known_field(vec_space)
     return scaled_norm.instantiate({
         K: field,
         H: vec_space,
         alpha: self.scalar,
         x: self.scaled
     })
コード例 #5
0
    def disassociation(self, idx, *, field=None, 
                       **defaults_config):
        '''
        Given vector operands, or all CartExp operands, deduce that
        this expression is equal to a form in which operand at index
        idx is no longer grouped together.
        For example, calling
        (a ⊗ b ⊗ ... (l ⊗ ... ⊗ m) ... ⊗ y ⊗ z).association(l-1)
        would return
        |- (a ⊗ b ⊗ ... (l ⊗ ... ⊗ m) ... ⊗ y ⊗ z) = 
            (a ⊗ b ⊗ ... ⊗ l ⊗ ... ⊗ m ⊗ ... ⊗ y ⊗ z)
        Or calling (R3 ⊗ (R3 ⊗ R3)).disassociate(1) would return
        |- (R3 ⊗ (R3 ⊗ R3)) = (R3 ⊗ R3 ⊗ R3) 
        
        For this to work in the vectors case, the vector operands must
        be known to be in vector spaces of a common field.  If the
        field is not specified, then VecSpaces.default_field is used.
        For this to work in the case of CartExp operands, all operands
        must be (recursively) CartExps and each must be known to be
        a vector space.
        '''
        # ORIGINAL BELOW before augmenting for CartExp cases
        # from . import tensor_prod_disassociation
        # _V = VecSpaces.known_vec_space(self, field=field)
        # _K = VecSpaces.known_field(_V)
        # eq = apply_disassociation_thm(
        #         self, idx, tensor_prod_disassociation,
        #         repl_map_extras={K:_K, V:_V}).derive_consequent()
        # return eq.with_wrapping_at()

        if not TensorProd.all_ops_are_cart_exp(self):
            from . import tensor_prod_disassociation
            _V = VecSpaces.known_vec_space(self, field=field)
            _K = VecSpaces.known_field(_V)
            eq = apply_disassociation_thm(
                    self, idx, tensor_prod_disassociation,
                    repl_map_extras={K:_K, V:_V}).derive_consequent()
            return eq.with_wrapping_at()
        else:
            from . import tensor_prod_vec_space_disassociation
            if field is None:
                _K = VecSpaces.known_field(self.operands[0])
            else:
                _K = field
            eq = apply_disassociation_thm(
                    self, idx, tensor_prod_vec_space_disassociation,
                    repl_map_extras={K:_K})
            return eq.with_wrapping_at()
コード例 #6
0
 def distribution(self, idx, *, field=None,
                  **defaults_config):
     '''
     Given a TensorProd operand at the (0-based) index location
     'idx' that is a vector sum or summation, prove the distribution
     over that TensorProd factor and return an equality to the 
     original TensorProd. For example, we could take the TensorProd
         tens_prod = TensorProd(a, b+c, d)
     and call tens_prod.distribution(1) to obtain:
         |- TensorProd(a, b+c, d) =
            TensorProd(a, b, d) + TensorProd(a, c, d)
     '''
     from . import (tensor_prod_distribution_over_add,
                    tensor_prod_distribution_over_summation)
     _V = VecSpaces.known_vec_space(self, field=field)
     _K = VecSpaces.known_field(_V)
     sum_factor = self.operands[idx]
     _a = self.operands[:idx]
     _c = self.operands[idx+1:]
     _i = _a.num_elements()
     _k = _c.num_elements()
     if isinstance(sum_factor, VecAdd):
         _b = sum_factor.operands
         _V = VecSpaces.known_vec_space(self, field=field)
         _j = _b.num_elements()
         # use preserve_all=True in the following instantiation
         # because the instantiation is an intermediate step;
         # otherwise auto_simplification can over-do things
         impl = tensor_prod_distribution_over_add.instantiate(
             {K:_K, i:_i, j:_j, k:_k, V:_V, a:_a, b:_b, c:_c},
             preserve_all=True)
         return impl.derive_consequent().with_wrapping_at()
     elif isinstance(sum_factor, VecSum):
         _b = sum_factor.indices
         _j = _b.num_elements()
         _f = Lambda(sum_factor.indices, sum_factor.summand)
         _Q = Lambda(sum_factor.indices, sum_factor.condition)
         # use preserve_all=True in the following instantiation
         # because the instantiation is an intermediate step;
         # otherwise auto_simplification can over-do things
         impl = tensor_prod_distribution_over_summation.instantiate(
                 {K:_K, f:_f, Q:_Q, i:_i, j:_j, k:_k, 
                  V:_V, a:_a, b:_b, c:_c}, preserve_all=True)
         return impl.derive_consequent().with_wrapping_at()
     else:
         raise ValueError(
             "Don't know how to distribute tensor product over " +
             str(sum_factor.__class__) + " factor")
コード例 #7
0
ファイル: scalar_mult.py プロジェクト: PyProveIt/Prove-It
    def scalar_one_elimination(self, **defaults_config):
        '''
        Equivalence method that derives a simplification in which
        a single scalar multiplier of 1 is eliminated.
        For example, letting v denote a vector element of some VecSpace,
        then ScalarMult(one, v).scalar_one_elimination() would return
            |- ScalarMult(one, v) = v
        Will need to know that v is in VecSpaces(K) and that the
        multiplicative identity 1 is in the field K. This might require
        assumptions or pre-proving of a VecSpace that contains the
        vector appearing in the ScalarMult expression.
        '''
        from proveit.numbers import one
        # from . import elim_one_left, elim_one_right
        from . import one_as_scalar_mult_id

        # The following seems silly -- if the scalar is not 1, just
        # return with no change instead?
        if self.scalar != one:
            raise ValueError(
                "For ScalarMult.scalar_one_elimination(), the scalar "
                "multiplier in {0} was expected to be 1 but instead "
                "was {1}.".format(self, self.scalar))

        # obtain the instance params:
        # _K is the field; _V is the vector space over field _K;
        # and _v is the vector in _V being scaled
        _K, _v, _V = one_as_scalar_mult_id.all_instance_params()

        # isolate the vector portion
        _v_sub = self.scaled

        # Find a containing vector space V in the theorem.
        # This may fail!
        _V_sub = list(VecSpaces.yield_known_vec_spaces(_v_sub))[0]

        # Find a vec space field, hopefully one that contains the mult
        # identity 1. This may fail!
        _K_sub = list(VecSpaces.yield_known_fields(_V_sub))[0]

        # If we made it this far, can probably instantiate the theorem
        # (although it could still fail if 1 is not in the field _K_sub)
        return one_as_scalar_mult_id.instantiate({
            _K: _K_sub,
            _v: _v_sub,
            _V: _V_sub
        })
コード例 #8
0
 def deduce_in_vec_space(self, vec_space=None, *, field,
                         **defaults_config):
     '''
     Deduce that the tensor product of vectors is in a vector space
     which is the tensor product of corresponding vectors spaces.
     '''
     from . import tensor_prod_is_in_tensor_prod_space
     _a = self.operands
     _i = _a.num_elements()
     _K = VecSpaces.get_field(field)
     vec_spaces = VecSpaces.known_vec_spaces(self.operands, field=_K)
     membership = tensor_prod_is_in_tensor_prod_space.instantiate(
             {K: _K, i: _i, V: vec_spaces, a: _a})      
     if vec_space is not None and membership.domain != vec_space:
         sub_rel = SubsetEq(membership.domain, vec_space)
         return sub_rel.derive_superset_membership(self)
     return membership
コード例 #9
0
 def scalar_factorization(self, idx=None, *, field=None,
                          **defaults_config):
     '''
     Prove the factorization of a scalar from one of the tensor 
     product operands and return the original tensor product equal 
     to the factored version.  If idx is provided, it will specify 
     the (0-based) index location of the ScalarMult operand with the
     multiplier to factor out.  If no idx is provided, the first 
     ScalarMult operand will be targeted.
     
     For example,
         TensorProd(a, ScalarMult(c, b), d).factorization(1)
     returns
         |- TensorProd(a, ScalarMult(c, b), d) =
            c TensorProd(a, b, d)
     
     As a prerequisite, the operands must be known to be vectors in
     vector spaces over a common field which contains the scalar
     multiplier being factored.  If the field is not specified,
     then VecSpaces.default_field is used.
     '''
     from . import factor_scalar_from_tensor_prod
     if idx is None:
         for _k, operand in enumerate(self.operands):
             if isinstance(operand, ScalarMult):
                 idx = _k
                 break
     elif idx < 0:
         # use wrap-around indexing
         idx = self.operand.num_entries() + idx
     if not isinstance(self.operands[idx], ScalarMult):
         raise TypeError("Expected the 'operand' and 'operand_idx' to be "
                         "a ScalarMult")            
     _V = VecSpaces.known_vec_space(self, field=field)
     _K = VecSpaces.known_field(_V)
     _alpha = self.operands[idx].scalar
     _a = self.operands[:idx]
     _b = self.operands[idx].scaled
     _c = self.operands[idx+1:]
     _i = _a.num_elements()
     _k = _c.num_elements()
     impl = factor_scalar_from_tensor_prod.instantiate(
             {K:_K, alpha:_alpha, i:_i, k:_k, V:_V, a:_a, b:_b, c:_c})
     return impl.derive_consequent().with_wrapping_at()
コード例 #10
0
ファイル: scalar_mult.py プロジェクト: PyProveIt/Prove-It
 def deduce_in_vec_space(self, vec_space=None, *, field, **defaults_config):
     '''
     Prove that this scaled vector is in a vector space.  The vector
     space may be specified or inferred via known memberships.  
     A field for the vector space must be specified.
     '''
     from . import scalar_mult_closure
     if vec_space is None:
         # No vector space given, so we'll have to look for
         # a known membership of 'scaled' in a vector space.
         # This may be arbitrarily chosen.
         vec_space = VecSpaces.known_vec_space(self.scaled, field=field)
         field = VecSpaces.known_field(vec_space)
     return scalar_mult_closure.instantiate({
         K: field,
         V: vec_space,
         a: self.scalar,
         x: self.scaled
     })
コード例 #11
0
ファイル: tensor_exp.py プロジェクト: PyProveIt/Prove-It
 def derive_cartexp_membership(self, **defaults_config):
     '''
     Derive membership in the CartExp which is a superset of the
     TensorExp.
     '''
     from . import tensor_exp_inclusion, tensor_exp_of_cart_exp_inclusion
     if isinstance(self.domain.base, CartExp):
         _V = self.domain.base
         _K = VecSpaces.known_field(_V)
         _m = self.domain.base.exponent
         _n = self.domain.exponent
         inclusion = tensor_exp_of_cart_exp_inclusion.instantiate(
                 {V:_V, K:_K, m:_m, n:_n})
     else:
         _V = self.domain.base
         _K = VecSpaces.known_field(_V)
         _n = self.domain.exponent
         inclusion = tensor_exp_inclusion.instantiate(
                 {V:_V, K:_K, n:_n})
     return inclusion.derive_superset_membership(self.element)
コード例 #12
0
ファイル: inner_prod.py プロジェクト: PyProveIt/Prove-It
 def shallow_simplification(self, *, must_evaluate=False,
                            **defaults_config):
     '''
     Simplify via inner product linearity:
     <a x, y> = a <x, y>
     <x, y> = <x, a y>
     <x + y, z> = <x, z> + <y, z>
     <x, y + z> = <x, y> + <x, z>
     '''
     from proveit.linear_algebra import VecSpaces, ScalarMult, VecAdd
     from proveit.linear_algebra.inner_products import (
             inner_prod_scalar_mult_left, inner_prod_scalar_mult_right,
             inner_prod_vec_add_left, inner_prod_vec_add_right)
     _u, _v = self.operands
     try:
         vec_space = VecSpaces.common_known_vec_space((_u, _v))
     except UnsatisfiedPrerequisites:
         # No known common vectors space for the operands, so
         # we have no specific shallow_simplication we can do here.
         return Operation.shallow_simplification(
                 self, must_evaluate=must_evaluate)
     field = VecSpaces.known_field(vec_space)
     simp = None
     if isinstance(_u, ScalarMult):
         simp = inner_prod_scalar_mult_left.instantiate(
                 {K:field, H:vec_space, a:_u.scalar, x:_u.scaled, y:_v})
     if isinstance(_v, ScalarMult):
         simp = inner_prod_scalar_mult_right.instantiate(
                 {K:field, H:vec_space, a:_v.scalar, x:_u, y:_v.scaled})
     if isinstance(_u, VecAdd):
         simp = inner_prod_vec_add_left.instantiate(
                 {K:field, H:vec_space, x:_u.terms[0], y:_u.terms[1], z:_v})
     if isinstance(_v, VecAdd):
         simp = inner_prod_vec_add_right.instantiate(
                 {K:field, H:vec_space, x:_u, y:_v.terms[0], z:_v.terms[1]})
     if simp is None:
         return Operation.shallow_simplification(
                 self, must_evaluate=must_evaluate)
     if must_evaluate and not is_irreducible_value(simp.rhs):
         return simp.inner_expr().rhs.evaluate()
     return simp
コード例 #13
0
 def insert_vec_on_both_sides_of_equals(tensor_equality, idx, vec,
                                        rhs_idx = None, *,
                                        field = None, 
                                        **defaults_config):
     '''
     From an equality with tensor products of vectors on
     both sides, derive a similar equality but with a vector 
     operand inserted at the particular given zero-based index (idx).
     A different index may be specified for the right side as the 
     left side by setting rhs_idx (i.e., if entries don't line up 
     due to differences of ExprRange entries), but the default will
     be to use the same.
     '''
     from . import insert_vec_on_both_sides_of_equality
     # First check various characteristics of the tensor_equality
     tensor_equality = TensorProd._check_tensor_equality(
             tensor_equality, allow_unary=True)
     if idx < 0:
         # use wrap-around indexing
         idx = tensor_equality.num_entries() + idx
     if rhs_idx is None:
         rhs_idx = idx # use the same index on both sides by default
     _a = tensor_equality.lhs.operands[:idx]
     _b = vec
     _c = tensor_equality.lhs.operands[idx:]
     _d = tensor_equality.rhs.operands[:rhs_idx]
     _e = tensor_equality.rhs.operands[rhs_idx:]
     _i = _a.num_elements()
     _k = _c.num_elements()
     vec_space = VecSpaces.known_vec_space(tensor_equality.lhs, 
                                           field=field)
     _K = VecSpaces.known_field(vec_space)
     _U = VecSpaces.known_vec_spaces(_a, field=_K)
     _V = VecSpaces.known_vec_space(_b, field=_K)
     _W = VecSpaces.known_vec_spaces(_c, field=_K)
     impl = insert_vec_on_both_sides_of_equality.instantiate(
             {K:_K, i:_i, k:_k, U:_U, V:_V, W:_W, 
              a:_a, b:_b, c:_c, d:_d, e:_e})
     return impl.derive_consequent().with_mimicked_style(tensor_equality)
コード例 #14
0
 def compute_norm(self, **defaults_config):
     '''
     Proves ‖x + y‖ = sqrt(‖x‖^2 + ‖y‖^2) if the inner product 
     of x and y is zero.
     '''
     from proveit.linear_algebra import InnerProd, ScalarMult
     from . import norm_of_sum_of_orthogonal_pair
     if self.operands.is_double():
         _a, _b = self.operands
         _x = _a.scaled if isinstance(_a, ScalarMult) else _a
         _y = _b.scaled if isinstance(_b, ScalarMult) else _b
         if Equals(InnerProd(_x, _y), zero).proven():
             vec_space = VecSpaces.known_vec_space(_a)
             field = VecSpaces.known_field(vec_space)
             return norm_of_sum_of_orthogonal_pair.instantiate({
                 K: field,
                 V: vec_space,
                 a: _a,
                 b: _b
             })
     raise NotImplementedError(
         "VecAdd.compute_norm is only implemented for an "
         "orthogonal pair of vectors")
コード例 #15
0
ファイル: vec_sum.py プロジェクト: PyProveIt/Prove-It
    def deduce_in_vec_space(self, vec_space=None, *, field=None,
                            **defaults_config):
        '''
        Prove that this vector summation is in a vector space.
        '''
        from . import summation_closure
        if vec_space is None:
            with defaults.temporary() as tmp_defaults:
                tmp_defaults.assumptions = (defaults.assumptions + 
                                            self.conditions.entries)
                vec_space = VecSpaces.known_vec_space(self.summand, 
                                                      field=field)

        _V = vec_space
        _K = VecSpaces.known_field(_V)
        _b = self.indices
        _j = _b.num_elements()
        _f = Lambda(self.indices, self.summand)
        if not hasattr(self, 'condition'):
            print(self)
        _Q = Lambda(self.indices, self.condition)
        return summation_closure.instantiate(
                {j:_j, K:_K, f:_f, Q:_Q, V:_V, b:_b}).derive_consequent()
コード例 #16
0
ファイル: qmult.py プロジェクト: PyProveIt/Prove-It
 def deduce_in_vec_space(self, vec_space=None, *, field, **defaults_config):
     '''
     Prove that this Qmult is in a vector space (e.g., if it is
     a ket).
     '''
     from proveit.physics.quantum import QmultCodomain
     # In the process of proving that 'self' is in QmultCodomain,
     # it will prove it is a vector in a Hilbert space if
     # appropriate.
     QmultCodomain.membership_object(self).conclude()
     if vec_space is not None:
         return InSet(self, vec_space).prove()
     return InSet(self, VecSpaces.known_vec_space(self,
                                                  field=field)).prove()
コード例 #17
0
 def yield_known_hilbert_spaces(vec):
     '''
     Given a vector expression, vec, yield any Hilbert spaces
     known to contain vec.
     '''
     for vec_space in VecSpaces.yield_known_vec_spaces(vec, field=Complex):
         if vec_space in HilbertSpacesLiteral.known_spaces_memberships:
             # This vector space is already known to be an inner
             # product space.
             yield vec_space
         else:
             try:
                 deduce_as_hilbert_space(vec_space)
                 yield vec_space
             except NotImplementedError:
                 # Not known you to prove 'vec_space' is an inner
                 # product space.
                 pass
コード例 #18
0
ファイル: vec_sum.py プロジェクト: PyProveIt/Prove-It
    def vec_sum_elimination(self, field=None, **defaults_config):
        '''
        For a VecSum in which the summand does not depend on the 
        summation index, return an equality between this VecSum and
        the equivalent expression in which the VecSum is eliminated.
        For example, suppose self = VecSum(i, v, Interval(2, 4)).
        Then self.vec_sum_elimination() would return
        |- self = 3*v
        where the 3*v is actually ScalarMult(3, v).
        The method works only for a VecSum over a single summation
        index, and simply returns self = self if the VecSum elimination
        is not possible due to the summand being dependent on the
        index of summation.
        '''

        expr = self
        summation_index = expr.index
        eq = TransRelUpdater(expr)

        if summation_index not in free_vars(expr.summand):
            vec_space_membership = expr.summand.deduce_in_vec_space(
                field=field,
                assumptions = defaults.assumptions + expr.conditions.entries)
            _V_sub = vec_space_membership.domain
            _K_sub = VecSpaces.known_field(_V_sub)
            _j_sub = expr.condition.domain.lower_bound
            _k_sub = expr.condition.domain.upper_bound
            _v_sub = expr.summand
            from proveit.linear_algebra.addition import vec_sum_of_constant_vec
            eq.update(vec_sum_of_constant_vec.instantiate(
                    {V: _V_sub, K: _K_sub, j: _j_sub, k: _k_sub, v: _v_sub}))

        else:
            print("VecSum cannot be eliminated. The summand {0} appears "
                  "to depend on the index of summation {1}".
                  format(expr.summand, summation_index))

        return eq.relation
コード例 #19
0
ファイル: vec_sum.py プロジェクト: PyProveIt/Prove-It
    def tensor_prod_factoring(self, idx=None, idx_beg=None, idx_end=None,
                              field=None, **defaults_config):
        '''
        For a VecSum with a TensorProd summand or ScalarMult summand
        with a scaled attribute being a TensorProd, factor out from
        the VecSum the TensorProd vectors other than the ones indicated
        by the (0-based) idx, or idx_beg and idx_end pair and return
        an equality between the original VecSum and the new TensorProd.
        For example, we could take the VecSum defined by
        vec_sum = VecSum(TensorProd(x, f(i), y, z))
        and call vec_sum.tensor_prod_factoring(idx_beg=1, idx_end=2)
        to obtain:

            |- VecSum(TensorProd(x, f(i), y, z)) = 
               TensorProd(x, VecSum(TensorProd(f(i), y)), z)

        This method should work even if the summand is a nested
        ScalarMult. Note that any vectors inside the TensorProd that
        depend on the index of summation cannot be pulled out of the
        VecSum and thus will cause the method to fail if not chosen
        to remain inside the VecSum. If all idx args are 'None',
        method will factor out all possible vector factors, including
        the case where all factors could be removed and the VecSum
        eliminated entirely.
        Note that this method only works when self has a single
        index of summation.
        '''
        expr = self
        the_summand = self.summand

        eq = TransRelUpdater(expr)

        # Check that 
        #    (1) the_summand is a TensorProd
        # or (2) the_summand is a ScalarMult;
        # otherwise, this method does not apply
        from proveit.linear_algebra import ScalarMult, TensorProd
        if isinstance(the_summand, ScalarMult):
            # try shallow simplification first to remove nested
            # ScalarMults and multiplicative identities
            expr = eq.update(expr.inner_expr().summand.shallow_simplification())
            the_summand = expr.summand
        if isinstance(the_summand, TensorProd):
            tensor_prod_expr = the_summand
            tensor_prod_summand = True
            tensor_prod_factors_list = list(
                    the_summand.operands.entries)
        elif (isinstance(the_summand, ScalarMult)
              and isinstance(the_summand.scaled, TensorProd)):
            tensor_prod_expr = the_summand.scaled
            tensor_prod_summand = False
            tensor_prod_factors_list = list(
                    the_summand.scaled.operands.entries)
        else:
            raise ValueError(
                "tensor_prod_factoring() requires the VecSum summand "
                "to be a TensorProd or a ScalarMult (with its 'scaled' "
                "attribute a TensorProd); instead the "
                "summand is {}".format(self.instance_expr))

        if idx is None and idx_beg is None and idx_end is None:
            # prepare to take out all possible factors, including
            # the complete elimination of the VecSum if possible
            if expr.index not in free_vars(expr.summand):
                # summand does not depend on index of summation
                # so we can eliminate the VecSum entirely
                return expr.vec_sum_elimination(field=field)
            if expr.index in free_vars(tensor_prod_expr):
                # identify the extractable vs. non-extractable
                # TensorProd factors (and there must be at least
                # one such non-extractable factor)
                
                idx_beg = -1
                idx_end = -1
                for i in range(len(expr.summand.operands.entries)):
                    if expr.index in free_vars(tensor_prod_expr.operands[i]):
                        if idx_beg == -1:
                            idx_beg = i
                            idx_end = idx_beg
                        else:
                            idx_end = i
            else:
                # The alternative is that the summand is
                # a ScalarMult with the scalar (but not the scaled)
                # being dependent on the index of summation. It's not
                # obvious what's best to do in this case, but we set
                # things up to factor out all but the last of the
                # TensorProd factors (so we'll factor out at least
                # 1 factor)
                idx_beg = len(tensor_prod_expr.operands.entries) - 1
                idx_end = idx_beg


        # Check that the provided idxs are within bounds
        # (it should refer to an actual TensorProd operand)

        num_vec_factors = len(tensor_prod_factors_list)
        if idx is not None and idx >= num_vec_factors:
            raise ValueError(
                    "idx value {0} provided for tensor_prod_factoring() "
                    "method is out-of-bounds; the TensorProd summand has "
                    "{1} factors: {2}, and thus possibly indices 0-{3}".
                    format(idx, len(tensor_prod_factors_list),
                           tensor_prod_factors_list,
                           len(tensor_prod_factors_list)-1))
        if idx_beg is not None and idx_end is not None:
            if (idx_end < idx_beg or idx_beg >= num_vec_factors or
                idx_end >= num_vec_factors):
                raise ValueError(
                    "idx_beg value {0} or idx_end value {1} (or both) "
                    "provided for tensor_prod_factoring() "
                    "method is/are out-of-bounds; the TensorProd summand "
                    "has {2} factors: {3}, and thus possibly indices 0-{3}".
                    format(idx_beg, idx_end, num_vec_factors,
                           tensor_prod_factors_list,num_vec_factors-1))
        if idx is not None:
            # take single idx as the default
            idx_beg = idx
            idx_end = idx

        # Check that the TensorProd factors to be factored out do not
        # rely on the VecSum index of summation
        summation_index = expr.index
        for i in range(num_vec_factors):
            if i < idx_beg or i > idx_end:
                the_factor = tensor_prod_factors_list[i]
                if summation_index in free_vars(the_factor):
                    raise ValueError(
                            "TensorProd factor {0} cannot be factored "
                            "out of the given VecSum summation because "
                            "it is a function of the summation index {1}.".
                            format(the_factor, summation_index))
        
        # Everything checks out as best we can tell, so prepare to
        # import and instantiate the appropriate theorem,
        # depending on whether:
        # (1) the_summand is a TensorProd, or
        # (2) the_summand is a ScalarMult (with a TensorProd 'scaled')
        if tensor_prod_summand:
            from proveit.linear_algebra.tensors import (
                tensor_prod_distribution_over_summation)
        else:
            from proveit.linear_algebra.tensors import (
                tensor_prod_distribution_over_summation_with_scalar_mult)
        if idx_beg != idx_end:
            # need to associate the elements and change idx value
            # but process is slightly different in the two cases
            if tensor_prod_summand:
                expr = eq.update(expr.inner_expr().summand.association(
                        idx_beg, idx_end-idx_beg+1))
                tensor_prod_expr = expr.summand
            else:
                expr = eq.update(expr.inner_expr().summand.scaled.association(
                        idx_beg, idx_end-idx_beg+1))
                tensor_prod_expr = expr.summand.scaled
        idx = idx_beg

        from proveit import K, f, Q, i, j, k, V, a, b, c, s
        # actually, maybe it doesn't matter and we can deduce the 
        # vector space regardless: (Adding this temp 12/26/21)
        vec_space_membership = expr.summand.deduce_in_vec_space(
            field=field,
            assumptions = defaults.assumptions + expr.conditions.entries)
        _V_sub = vec_space_membership.domain
        # Substitutions regardless of Case
        _K_sub = VecSpaces.known_field(_V_sub)
        _b_sub = expr.indices
        _j_sub = _b_sub.num_elements()
        _Q_sub = Lambda(expr.indices, expr.condition)
        # Case-specific substitutions, using updated tensor_prod_expr:
        _a_sub = tensor_prod_expr.operands[:idx]
        _c_sub = tensor_prod_expr.operands[idx+1:]
        _f_sub = Lambda(expr.indices, tensor_prod_expr.operands[idx])
        if not tensor_prod_summand:
            _s_sub = Lambda(expr.indices, expr.summand.scalar)
        # Case-dependent substitutions:
        _i_sub = _a_sub.num_elements()
        _k_sub = _c_sub.num_elements()

        if tensor_prod_summand:
            impl = tensor_prod_distribution_over_summation.instantiate(
                    {K:_K_sub, f:_f_sub, Q:_Q_sub, i:_i_sub, j:_j_sub,
                     k:_k_sub, V:_V_sub, a:_a_sub, b:_b_sub, c:_c_sub},
                     preserve_expr=expr)
        else:
            impl = (tensor_prod_distribution_over_summation_with_scalar_mult.
                   instantiate(
                    {K:_K_sub, f:_f_sub, Q:_Q_sub, i:_i_sub, j:_j_sub,
                     k:_k_sub, V:_V_sub, a:_a_sub, b:_b_sub, c:_c_sub,
                     s: _s_sub}, preserve_expr=expr))

        expr = eq.update(impl.derive_consequent(
                assumptions = defaults.assumptions + expr.conditions.entries).
                derive_reversed())

        return eq.relation
コード例 #20
0
    def conclude(self, **defaults_config):
        '''
        Called on self = [elem in (A x B x ...)] (where x denotes
        a tensor product and elem = a x b x ...) and knowing or
        assuming that (a in A) and (b in B) and ..., derive and
        return self.
        '''
        if isinstance(self.element, TensorProd):
            from . import tensor_prod_is_in_tensor_prod_space
            from proveit.linear_algebra import VecSpaces
            # we will need the domain acknowledged as a VecSpace
            # so we can later get its underlying field
            self.domain.deduce_as_vec_space()
            _a_sub = self.element.operands
            _i_sub = _a_sub.num_elements()
            _K_sub = VecSpaces.known_field(self.domain)
            vec_spaces = self.domain.operands

            return tensor_prod_is_in_tensor_prod_space.instantiate({
                a:
                _a_sub,
                i:
                _i_sub,
                K:
                _K_sub,
                V:
                vec_spaces
            })

        if isinstance(self.element, ScalarMult):
            from proveit.linear_algebra.scalar_multiplication import (
                scalar_mult_closure)
            from proveit.linear_algebra import VecSpaces
            self.domain.deduce_as_vec_space()
            _V_sub = VecSpaces.known_vec_space(self.element.scaled, field=None)
            _K_sub = VecSpaces.known_field(_V_sub)
            _a_sub = self.element.scalar
            _x_sub = self.element.scaled
            return scalar_mult_closure.instantiate({
                V: _V_sub,
                K: _K_sub,
                a: _a_sub,
                x: _x_sub
            })

        if isinstance(self.element, VecSum):
            from proveit.linear_algebra.addition import summation_closure
            from proveit.linear_algebra import VecSpaces
            self.domain.deduce_as_vec_space()
            # might want to change the following to use
            # vec_space_membership = self.element.summand.deduce_in_vec_space()
            # then _V_sub = vec_space_membership.domain
            _V_sub = VecSpaces.known_vec_space(self.element.summand)
            _K_sub = VecSpaces.known_field(_V_sub)
            _b_sub = self.element.indices
            _j_sub = _b_sub.num_elements()
            _f_sub = Lambda(self.element.indices, self.element.summand)
            _Q_sub = Lambda(self.element.indices, self.element.condition)
            imp = summation_closure.instantiate({
                V: _V_sub,
                K: _K_sub,
                b: _b_sub,
                j: _j_sub,
                f: _f_sub,
                Q: _Q_sub
            })
            return imp.derive_consequent()

        raise ProofFailure(
            self, defaults.assumptions, "Element {0} is neither a TensorProd "
            "nor a ScalarMult.".format(self.element))
コード例 #21
0
ファイル: vec_sum.py プロジェクト: PyProveIt/Prove-It
    def factors_extraction(self, field=None, **defaults_config):
        '''
        Derive an equality between this VecSum and the result
        when all possible leading scalar factors have been extracted
        and moved to the front of the VecSum (for example, in the
        case where the summand of the VecSum is a ScalarMult) and
        all possible tensor product factors have been moved outside
        the VecSum (in front if possible, or afterward if necessary).
        For example, we could take the VecSum
            vec_sum = VecSum(ScalarMult(a, TensorProd(x, f(i), y))),
        where the index of summation is i, and call
            vec_sum.factor_extraction() to obtain:
            |- vec_sum = 
               ScalarMult(a, TensorProd(x, VecSum(f(i)), y))
        Note that any factors inside the summand that depend on the
        index of summation cannot be pulled out from inside the VecSum,
        and thus pose limitations on the result.
        Note that this method only works when self has a single
        index of summation, and only when self has a summand that is
        a ScalarMult or TensorProd.
        Later versions of this method should provide mechanisms to
        specify factors to extract from, and/or leave behind in, the
        VecSum.
        '''
        expr = self
        summation_index = expr.index
        assumptions = defaults.assumptions + expr.conditions.entries
        assumptions_with_conditions = (
                defaults.assumptions + expr.conditions.entries)

        # for convenience in updating our equation:
        # this begins with eq.relation as expr = expr
        eq = TransRelUpdater(expr)

        # If the summand is a ScalarMult, perform a
        # shallow_simplification(), which will remove nested
        # ScalarMults and multiplicative identities. This is
        # intended to simplify without changing too much the
        # intent of the user. This might even transform the
        # ScalarMult object into something else.
        from proveit.linear_algebra import ScalarMult, TensorProd
        if isinstance(expr.summand, ScalarMult):
            expr = eq.update(
                    expr.inner_expr().summand.shallow_simplification())
        if isinstance(expr.summand, ScalarMult):
            # had to re-check, b/c the shallow_simplification might
            # have transformed the ScalarMult into the scaled object
            tensor_prod_summand = False # not clearly useful; review please
            the_scalar = expr.summand.scalar
            
        elif isinstance(expr.summand, TensorProd):
            tensor_prod_summand = True # not clearly useful; review please

        if isinstance(expr.summand, ScalarMult):
            if summation_index not in free_vars(expr.summand.scalar):
                # it doesn't matter what the scalar is; the whole thing
                # can be pulled out in front of the VecSum
                from proveit.linear_algebra.scalar_multiplication import (
                    distribution_over_vec_sum)
                summand_in_vec_space = expr.summand.deduce_in_vec_space(
                        field=field, assumptions=assumptions_with_conditions)
                _V_sub = summand_in_vec_space.domain
                _K_sub = VecSpaces.known_field(_V_sub)
                _b_sub = expr.indices
                _j_sub = _b_sub.num_elements()
                _f_sub = Lambda(expr.indices, expr.summand.scaled)
                _Q_sub = Lambda(expr.indices, expr.condition)
                _k_sub = expr.summand.scalar
                imp = distribution_over_vec_sum.instantiate(
                        {V: _V_sub, K: _K_sub, b: _b_sub, j: _j_sub,
                         f: _f_sub, Q: _Q_sub, k: _k_sub},
                         assumptions=assumptions_with_conditions)
                expr = eq.update(imp.derive_consequent(
                    assumptions=assumptions_with_conditions).derive_reversed())
            else:
                # The scalar portion is dependent on summation index.
                # If the scalar itself is a Mult of things, go through
                # and pull to the front of the Mult all individual
                # factors that are not dependent on the summation index.
                if isinstance(expr.summand.scalar, Mult):
                    # Repeatedly pull index-independent factors #
                    # to the front of the Mult factors          #

                    # prepare to count the extractable and
                    # unextractable factors
                    _num_factored = 0
                    _num_unfactored = len(expr.summand.scalar.operands.entries)

                    # go through factors from back to front
                    for the_factor in reversed(
                            expr.summand.scalar.operands.entries):

                        if summation_index not in free_vars(the_factor):
                            expr = eq.update(
                                expr.inner_expr().summand.scalar.factorization(
                                    the_factor,
                                    assumptions=assumptions_with_conditions,
                                    preserve_all=True))
                            _num_factored += 1
                            _num_unfactored -= 1

                    # group the factorable factors
                    if _num_factored > 0:
                        expr = eq.update(
                            expr.inner_expr().summand.scalar.association(
                                0, _num_factored,
                                assumptions=assumptions_with_conditions,
                                preserve_all=True))
                    # group the unfactorable factors
                    if _num_unfactored > 1:
                        expr = eq.update(
                            expr.inner_expr().summand.scalar.association(
                                1, _num_unfactored,
                                assumptions=assumptions_with_conditions,
                                preserve_all=True))

                    # finally, extract any factorable scalar factors
                    if _num_factored > 0:
                        from proveit.linear_algebra.scalar_multiplication import (
                                distribution_over_vec_sum_with_scalar_mult)
                        # Mult._simplification_directives_.ungroup = False
                        # _V_sub = VecSpaces.known_vec_space(expr, field=field)
                        summand_in_vec_space = (
                                expr.summand.deduce_in_vec_space(
                                        field = field,
                                        assumptions =
                                        assumptions_with_conditions))
                        _V_sub = summand_in_vec_space.domain
                        _K_sub = VecSpaces.known_field(_V_sub)
                        _b_sub = expr.indices
                        _j_sub = _b_sub.num_elements()
                        _f_sub = Lambda(expr.indices, expr.summand.scaled)
                        _Q_sub = Lambda(expr.indices, expr.condition)
                        _c_sub = Lambda(expr.indices,
                                        expr.summand.scalar.operands[1])
                        _k_sub = expr.summand.scalar.operands[0]
                        # when instantiating, we set preserve_expr=expr;
                        # otherwise auto_simplification disassociates inside
                        # the Mult.
                        impl = distribution_over_vec_sum_with_scalar_mult.instantiate(
                                {V:_V_sub, K:_K_sub, b: _b_sub, j: _j_sub,
                                 f: _f_sub, Q: _Q_sub, c:_c_sub, k: _k_sub},
                                 preserve_expr=expr,
                                assumptions=assumptions_with_conditions)
                        expr = eq.update(impl.derive_consequent(
                                assumptions=assumptions_with_conditions).
                                derive_reversed())

                else:
                    # The scalar component is dependent on summation
                    # index but is not a Mult.
                    # Revert everything and return self = self.
                    print("Found summation index {0} in the scalar {1} "
                          "and the scalar is not a Mult object.".
                      format(summation_index, expr.summand.scalar))
                    eq = TransRelUpdater(self)

        # ============================================================ #
        # VECTOR FACTORS                                               #
        # ============================================================ #
        # After the scalar factors (if any) have been dealt with,
        # proceed with the vector factors in any remaining TensorProd
        # in the summand.
        # Notice that we are not guaranteed at this point that we even
        # have a TensorProd to factor, and if we do have a TensorProd
        # we have not identified the non-index-dependent factors to 
        # extract.
        # After processing above for scalar factors, we might now have
        # (1) expr = VecSum (we didn't find scalar factors to extract),
        # inside of which we might have a ScalarMult or a TensorProd;
        # or (2) expr = ScalarMult (we found some scalar factors to
        # extract), with a VecSum as the scaled component.

        if isinstance(expr, VecSum):
            expr = eq.update(expr.tensor_prod_factoring())
        elif isinstance(expr, ScalarMult) and isinstance(expr.scaled, VecSum):
            expr = eq.update(expr.inner_expr().scaled.tensor_prod_factoring())

        return eq.relation
コード例 #22
0
    def factorization(self,
                      the_factor,
                      pull="left",
                      group_factors=True,
                      field=None,
                      **defaults_config):
        '''
        Deduce an equality between this VecAdd expression and a
        version in which either:
        (1) the scalar factor the_factor has been factored out in
            front (or possibly out behind) to produce a new ScalarMult;
        OR
        (2) the tensor product factor the_factor has been factored
            out in front (or possible out behind) to produce a new
            TensorProd.
        For example, if
            x = VecAdd(ScalarMult(a, v1), ScalarMult(a, v2))
        then x.factorization(a) produces:
            |- x = ScalarMult(a, VecAdd(v1, v2)).
        Prove-It will need to know or be able to derive a vector space
        in which the vectors live.
        This method only works if the terms of the VecAdd are all
        ScalarMult objects or all TensorProd objects.
        In the case of all ScalarMult objects, any nested ScalarMult
        objects are first flattened if possible.
        Note: In the case of a VecAdd of all TensorProd objects,
        the lack of commutativity for tensor products limits any
        factorable tensor product factors to those occurring on the
        far left or far right of each tensor product term. Thus, for
        example, if
        x = VecAdd(TensorProd(v1, v2, v3), TensorProd(v1, v4, v5))
        we can call x.factorization(v1) to obtain
        |- x =
        TensorProd(v1, VecAdd(TensorProd(v2, v3), TensorProd(v4, v5))),
        but we cannot factor v1 our of the expression
        y = VecAdd(TensorProd(v2, v1, v3), TensorProd(v4, v1, v5))
        '''

        expr = self
        eq = TransRelUpdater(expr)

        replacements = list(defaults.replacements)

        from proveit.linear_algebra import ScalarMult, TensorProd
        from proveit.numbers import one, Mult

        # Case (1) VecAdd(ScalarMult, ScalarMult, ..., ScalarMult)
        if all(isinstance(op, ScalarMult) for op in self.operands):
            # look for the_factor in each scalar;
            # code based on Add.factorization()
            _b = []
            for _i in range(expr.terms.num_entries()):
                # remove nesting of ScalarMults
                term = expr.terms[_i].shallow_simplification().rhs
                expr = eq.update(
                    expr.inner_expr().terms[_i].shallow_simplification())
                # simplify the scalar part of the ScalarMult
                term = term.inner_expr().scalar.shallow_simplification().rhs
                expr = eq.update(expr.inner_expr().terms[_i].scalar.
                                 shallow_simplification())
                if hasattr(term.scalar, 'factorization'):
                    term_scalar_factorization = term.scalar.factorization(
                        the_factor,
                        pull,
                        group_factors=group_factors,
                        group_remainder=True,
                        preserve_all=True)
                    if not isinstance(term_scalar_factorization.rhs, Mult):
                        raise ValueError(
                            "Expecting right hand side of each factorization "
                            "to be a product. Instead obtained: {}".format(
                                term_scalar_factorization.rhs))
                    if pull == 'left':
                        # the grouped remainder on the right
                        _b.append(
                            ScalarMult(
                                term_scalar_factorization.rhs.operands[-1],
                                term.scaled))
                    else:
                        # the grouped remainder on the left
                        _b.append(
                            ScalarMult(
                                term_scalar_factorization.rhs.operands[0],
                                term.scaled))
                    # substitute in the factorized term
                    expr = eq.update(
                        term_scalar_factorization.substitution(
                            expr.inner_expr().terms[_i].scalar,
                            preserve_all=True))
                else:
                    if term.scalar != the_factor:
                        raise ValueError(
                            "Factor, %s, is not present in the term at "
                            "index %d of %s!" % (the_factor, _i, self))
                    if pull == 'left':
                        replacements.append(
                            Mult(term.scalar, one).one_elimination(1))
                    else:
                        replacements.append(
                            Mult(one, term.scalar).one_elimination(0))
                    _b.append(ScalarMult(one, term.scaled))

            if not group_factors and isinstance(the_factor, Mult):
                factor_sub = the_factor.operands
            else:
                factor_sub = ExprTuple(the_factor)

            # pull left/right not really relevant for the ScalarMult
            # cases; this simplification step still seems relevant
            if defaults.auto_simplify:
                # Simplify the remainder of the factorization if
                # auto-simplify is enabled.
                replacements.append(VecAdd(*_b).simplification())

            from proveit import K, i, k, V, a
            # Perhaps here we could search through the operands to find
            # an appropriate VecSpace? Or maybe it doesn't matter?
            vec_space_membership = expr.operands[0].deduce_in_vec_space(
                field=field)
            _V_sub = vec_space_membership.domain
            _K_sub = VecSpaces.known_field(_V_sub)
            _i_sub = expr.operands.num_elements()
            _k_sub = the_factor
            _a_sub = ExprTuple(*_b)

            from proveit.linear_algebra.scalar_multiplication import (
                distribution_over_vectors)
            distribution = distribution_over_vectors.instantiate(
                {
                    V: _V_sub,
                    K: _K_sub,
                    i: _i_sub,
                    k: _k_sub,
                    a: _a_sub
                },
                replacements=replacements)

            # need to connect the distributed version back to the
            # original self, via a shallow_simplification() of
            # each of the ScalarMult terms resulting in the distribution
            for _i in range(len(distribution.rhs.operands.entries)):
                distribution = (distribution.inner_expr().rhs.operands[_i].
                                shallow_simplify())

            eq.update(distribution.derive_reversed())

        # Case (2) VecAdd(TensorProd, TensorProd, ..., TensorProd)
        elif all(isinstance(op, TensorProd) for op in self.operands):
            # if hasattr(the_factor, 'operands'):
            #     print("the_factor has operands: {}".format(the_factor.operands))
            #     the_factor_tuple = the_factor.operands.entries
            # else:
            #     print("the_factor does not have operands: {}".format(the_factor))
            #     the_factor_tuple = (the_factor,)
            if isinstance(the_factor, TensorProd):
                the_factor_tuple = the_factor.operands.entries
            else:
                the_factor_tuple = (the_factor, )
            # Setting the default_field here because the field
            # used manually in the association step somehow gets lost
            VecSpaces.default_field = field
            # look for the_factor in each TensorProd appearing in
            # the VecAdd operands, looking at the left vs. right
            # sides depending on the 'pull' direction specified
            _b = []  # to hold factors left behind
            for _i in range(expr.terms.num_entries()):
                # Notice we're not ready to deal with ExprRange
                # versions of Add operands here!
                # We are also implicitly assuming that each TensorProd
                # has at least two operands
                term = expr.terms[_i]
                if hasattr(term, 'operands'):
                    term_tuple = term.operands.entries
                else:
                    term_tuple = (term, )
                if pull == 'left':
                    # look for factor at left-most-side
                    if the_factor_tuple != term_tuple[0:len(the_factor_tuple)]:
                        raise ValueError(
                            "VecAdd.factorization() expecting the_factor "
                            "{0} to appear at the leftmost side of each "
                            "addend, but {0} does not appear at the "
                            "leftmost side of the addend {1}.".format(
                                the_factor, term))
                    else:
                        # we're OK, so save away the remainder of
                        # factors from the rhs of the term,
                        # and group any multi-term factor on the left
                        if len(term_tuple[len(the_factor_tuple):]) == 1:
                            _b.append(term_tuple[-1])
                        else:
                            _b.append(
                                TensorProd(
                                    *term_tuple[len(the_factor_tuple):]))
                            # then create an associated version of the
                            # expr to match the eventual thm instantiation
                            # ALSO NEED TO DO THIS FOR THE RIGHT CASE
                            expr = eq.update(
                                expr.inner_expr().operands[_i].association(
                                    len(the_factor_tuple),
                                    len(term_tuple) - len(the_factor_tuple),
                                    preserve_all=True))
                        # perhaps we actually don't need the assoc step?
                        # if len(the_factor_tuple) != 1:
                        #     expr = eq.update(expr.inner_expr().operands[_i].
                        #             association(0, len(the_factor_tuple),
                        #                     preserve_all=True))

                elif pull == 'right':
                    # look for factor at right-most-side
                    if the_factor_tuple != term_tuple[-(
                            len(the_factor_tuple)):]:
                        raise ValueError(
                            "VecAdd.factorization() expecting the_factor "
                            "{0} to appear at the rightmost side of each "
                            "addend, but {0} does not appear at the "
                            "rightmost side of the addend {1}.".format(
                                the_factor, term))
                    else:
                        # we're OK, so save away the remainder of
                        # factors from the lhs of the term,
                        # and group any multi-term factor on the right
                        if len(term_tuple[0:-(len(the_factor_tuple))]) == 1:
                            _b.append(term_tuple[0])
                        else:
                            _b.append(
                                TensorProd(
                                    *term_tuple[0:-(len(the_factor_tuple))]))
                            # then create an associated version of the
                            # expr to match the eventual thm instantiation
                            expr = eq.update(
                                expr.inner_expr().operands[_i].association(
                                    0,
                                    len(term_tuple) - len(the_factor_tuple),
                                    preserve_all=True))
                        # perhaps we actually don't need the assoc step?
                        # if len(the_factor_tuple) != 1:
                        #     expr = eq.update(expr.inner_expr().operands[_i].
                        #             association(
                        #                 len(term_tuple)-len(the_factor_tuple),
                        #                 len(the_factor_tuple),
                        #                 preserve_all=True))

                else:
                    raise ValueError(
                        "VecAdd.factorization() requires 'pull' argument "
                        "to be specified as either 'left' or 'right'.")

            # now ready to instantiate the TensorProd/VecAdd
            # theorem: tensor_prod_distribution_over_add
            # and derive it's reversed result
            from proveit.linear_algebra.tensors import (
                tensor_prod_distribution_over_add)
            from proveit import a, b, c, i, j, k, K, V
            from proveit.numbers import zero, one, num
            # useful to get ahead of time the num of operands
            # in the_factor and define the replacement
            # if hasattr(the_factor, 'operands'):
            #     num_factor_entries = num(the_factor.operands.num_entries())
            #     factor_entries = the_factor.operands.entries
            # else:
            #     num_factor_entries = one
            #     factor_entries = (the_factor,)
            # useful to get ahead of time the num of operands
            # in the_factor and define the replacement
            if isinstance(the_factor, TensorProd):
                num_factor_entries = num(the_factor.operands.num_entries())
                factor_entries = the_factor.operands.entries
            else:
                num_factor_entries = one
                factor_entries = (the_factor, )
            # call deduce_in_vec_space() on the original self
            # instead of the current expr, otherwise we can run into
            # compications due to the associated sub-terms
            vec_space_membership = self.operands[0].deduce_in_vec_space(
                field=field)
            _V_sub = vec_space_membership.domain
            _K_sub = VecSpaces.known_field(_V_sub)
            if pull == 'left':
                # num of operands in left the_factor
                _i_sub = num_factor_entries
                # num of operands in right factor
                _k_sub = zero
                # the actual factor operands
                _a_sub = factor_entries
                # the other side is empty
                _c_sub = ()
            elif pull == 'right':
                # left side is empty
                _i_sub = zero
                # right side has the factor
                _k_sub = num_factor_entries
                # left side is empty
                _a_sub = ()
                # right side has the factor
                _c_sub = factor_entries
            _j_sub = num(len(_b))
            _b_sub = ExprTuple(*_b)

            from proveit.linear_algebra.tensors import (
                tensor_prod_distribution_over_add)
            impl = tensor_prod_distribution_over_add.instantiate(
                {
                    V: _V_sub,
                    K: _K_sub,
                    i: _i_sub,
                    j: _j_sub,
                    k: _k_sub,
                    a: _a_sub,
                    b: _b_sub,
                    c: _c_sub
                },
                preserve_all=True)

            conseq = impl.derive_consequent()

            eq.update(conseq.derive_reversed())

        else:
            print("Not yet an identified case. Sorry!")

        return eq.relation
コード例 #23
0
    def deduce_in_vec_space(self, vec_space=None, *, field, **defaults_config):
        '''
        Prove that this linear combination of vectors is in a vector
        space.  The vector space may be specified or inferred via known
        memberships.  A field for the vector space must be specified.
        '''
        from proveit.linear_algebra import ScalarMult

        terms = self.terms
        if vec_space is None:
            vec_space = VecSpaces.common_known_vec_space(terms, field=field)
        field = VecSpaces.known_field(vec_space)
        all_scaled = all((isinstance(term, ScalarMult) or (
            isinstance(term, ExprRange) and isinstance(term.body, ScalarMult)))
                         for term in terms)
        if all_scaled:
            # Use a linear combination theorem since everything
            # is scaled.
            from proveit.linear_algebra.scalar_multiplication import (
                binary_lin_comb_closure, lin_comb_closure)
            if terms.is_double():
                # Special linear combination binary case
                _a, _b = terms[0].scalar, terms[1].scalar
                _x, _y = terms[0].scaled, terms[1].scaled
                return binary_lin_comb_closure.instantiate({
                    K: field,
                    V: vec_space,
                    a: _a,
                    b: _b,
                    x: _x,
                    y: _y
                })
            else:
                # General linear combination case
                _a = []
                _x = []
                for term in terms:
                    if isinstance(term, ExprRange):
                        _a.append(
                            ExprRange(term.parameter, term.body.scalar,
                                      term.true_start_index,
                                      term.true_end_index))
                        _x.append(
                            ExprRange(term.parameter, term.body.scaled,
                                      term.true_start_index,
                                      term.true_end_index))
                    else:
                        _a.append(term.scalar)
                        _x.append(term.scaled)
                _n = terms.num_elements()
                return lin_comb_closure.instantiate({
                    n: _n,
                    K: field,
                    V: vec_space,
                    a: _a,
                    x: _x
                })
        else:
            # Use a vector addition closure theorem.
            from . import binary_closure, closure
            if terms.is_double():
                # Special binary case
                return binary_closure.instantiate({
                    K: field,
                    V: vec_space,
                    x: terms[0],
                    y: terms[1]
                })
            else:
                # General case
                _n = terms.num_elements()
                return closure.instantiate({
                    n: _n,
                    K: field,
                    V: vec_space,
                    x: terms
                })