def double_scaling_reduction(self, **defaults_config): from . import doubly_scaled_as_singly_scaled if not isinstance(self.scaled, ScalarMult): raise ValueError("'double_scaling_reduction' is only applicable " "for a doubly nested ScalarMult") # Reduce doubly-nested ScalarMult _x = self.scaled.scaled # _V = VecSpaces.known_vec_space(_x) # the following is a little klunky, but trying to avoid the # use of a default field=Real if we're actually dealing with # complex scalars somewhere in the vector from proveit import free_vars if any([InSet(elem, Complex).proven() for elem in free_vars(self)]): _V = VecSpaces.known_vec_space(self, field=Complex) else: _V = VecSpaces.known_vec_space(self) _K = VecSpaces.known_field(_V) _alpha = self.scalar _beta = self.scaled.scalar return doubly_scaled_as_singly_scaled.instantiate({ K: _K, V: _V, x: _x, alpha: _alpha, beta: _beta })
def shallow_simplification(self, *, must_evaluate=False, **defaults_config): ''' Returns a proven simplification equation for this TensorProd expression assuming the operands have been simplified. Currently deals only with: (1) simplifying a TensorProd(x) (i.e. a TensorProd with a single operand x) to x itself. For example, TensorProd(x) = x. (2) Ungrouping nested tensor products. (3) Factoring out scalars. ''' if self.operands.is_single(): from . import unary_tensor_prod_def _V = VecSpaces.known_vec_space(self.operand) _K = VecSpaces.known_field(_V) return unary_tensor_prod_def.instantiate( {K:_K, V:_V, A:self.operands[0]}, preserve_all=True) # for convenience updating our equation: expr = self eq = TransRelUpdater(expr) if TensorProd._simplification_directives_.ungroup: # ungroup the expression (disassociate nested additions). _n = 0 length = expr.operands.num_entries() - 1 # loop through all operands while _n < length: operand = expr.operands[_n] if isinstance(operand, TensorProd): # if it is grouped, ungroup it expr = eq.update(expr.disassociation( _n, preserve_all=True)) length = expr.operands.num_entries() _n += 1 if TensorProd._simplification_directives_.factor_scalars: # Next, pull out scalar factors try: VecSpaces.known_vec_space(self) except ValueError: raise UnsatisfiedPrerequisites( "No known vector space for %s"%self) for _k, operand in enumerate(expr.operands): if isinstance(operand, ScalarMult): # Just pull out the first one we see and let # recursive simplifications take care of any more. # To make sure this happens we turn auto_simplify # on, and those simpiflications should all be fair # game as part of shallow_simplification. expr = eq.update(expr.scalar_factorization( _k, auto_simplify=True)) break # Future processing possible here. return eq.relation
def distribution(self, idx, *, field=None, **defaults_config): ''' Given a TensorProd operand at the (0-based) index location 'idx' that is a vector sum or summation, prove the distribution over that TensorProd factor and return an equality to the original TensorProd. For example, we could take the TensorProd tens_prod = TensorProd(a, b+c, d) and call tens_prod.distribution(1) to obtain: |- TensorProd(a, b+c, d) = TensorProd(a, b, d) + TensorProd(a, c, d) ''' from . import (tensor_prod_distribution_over_add, tensor_prod_distribution_over_summation) _V = VecSpaces.known_vec_space(self, field=field) _K = VecSpaces.known_field(_V) sum_factor = self.operands[idx] _a = self.operands[:idx] _c = self.operands[idx+1:] _i = _a.num_elements() _k = _c.num_elements() if isinstance(sum_factor, VecAdd): _b = sum_factor.operands _V = VecSpaces.known_vec_space(self, field=field) _j = _b.num_elements() # use preserve_all=True in the following instantiation # because the instantiation is an intermediate step; # otherwise auto_simplification can over-do things impl = tensor_prod_distribution_over_add.instantiate( {K:_K, i:_i, j:_j, k:_k, V:_V, a:_a, b:_b, c:_c}, preserve_all=True) return impl.derive_consequent().with_wrapping_at() elif isinstance(sum_factor, VecSum): _b = sum_factor.indices _j = _b.num_elements() _f = Lambda(sum_factor.indices, sum_factor.summand) _Q = Lambda(sum_factor.indices, sum_factor.condition) # use preserve_all=True in the following instantiation # because the instantiation is an intermediate step; # otherwise auto_simplification can over-do things impl = tensor_prod_distribution_over_summation.instantiate( {K:_K, f:_f, Q:_Q, i:_i, j:_j, k:_k, V:_V, a:_a, b:_b, c:_c}, preserve_all=True) return impl.derive_consequent().with_wrapping_at() else: raise ValueError( "Don't know how to distribute tensor product over " + str(sum_factor.__class__) + " factor")
def insert_vec_on_both_sides_of_equals(tensor_equality, idx, vec, rhs_idx = None, *, field = None, **defaults_config): ''' From an equality with tensor products of vectors on both sides, derive a similar equality but with a vector operand inserted at the particular given zero-based index (idx). A different index may be specified for the right side as the left side by setting rhs_idx (i.e., if entries don't line up due to differences of ExprRange entries), but the default will be to use the same. ''' from . import insert_vec_on_both_sides_of_equality # First check various characteristics of the tensor_equality tensor_equality = TensorProd._check_tensor_equality( tensor_equality, allow_unary=True) if idx < 0: # use wrap-around indexing idx = tensor_equality.num_entries() + idx if rhs_idx is None: rhs_idx = idx # use the same index on both sides by default _a = tensor_equality.lhs.operands[:idx] _b = vec _c = tensor_equality.lhs.operands[idx:] _d = tensor_equality.rhs.operands[:rhs_idx] _e = tensor_equality.rhs.operands[rhs_idx:] _i = _a.num_elements() _k = _c.num_elements() vec_space = VecSpaces.known_vec_space(tensor_equality.lhs, field=field) _K = VecSpaces.known_field(vec_space) _U = VecSpaces.known_vec_spaces(_a, field=_K) _V = VecSpaces.known_vec_space(_b, field=_K) _W = VecSpaces.known_vec_spaces(_c, field=_K) impl = insert_vec_on_both_sides_of_equality.instantiate( {K:_K, i:_i, k:_k, U:_U, V:_V, W:_W, a:_a, b:_b, c:_c, d:_d, e:_e}) return impl.derive_consequent().with_mimicked_style(tensor_equality)
def compute_norm(self, **defaults_config): ''' Proves ‖a v‖ = |a| ‖v‖. ''' from proveit.linear_algebra.inner_products import scaled_norm vec_space = VecSpaces.known_vec_space(self.scaled) field = VecSpaces.known_field(vec_space) return scaled_norm.instantiate({ K: field, H: vec_space, alpha: self.scalar, x: self.scaled })
def deduce_in_vec_space(self, vec_space=None, *, field, **defaults_config): ''' Prove that this Qmult is in a vector space (e.g., if it is a ket). ''' from proveit.physics.quantum import QmultCodomain # In the process of proving that 'self' is in QmultCodomain, # it will prove it is a vector in a Hilbert space if # appropriate. QmultCodomain.membership_object(self).conclude() if vec_space is not None: return InSet(self, vec_space).prove() return InSet(self, VecSpaces.known_vec_space(self, field=field)).prove()
def disassociation(self, idx, *, field=None, **defaults_config): ''' Given vector operands, or all CartExp operands, deduce that this expression is equal to a form in which operand at index idx is no longer grouped together. For example, calling (a ⊗ b ⊗ ... (l ⊗ ... ⊗ m) ... ⊗ y ⊗ z).association(l-1) would return |- (a ⊗ b ⊗ ... (l ⊗ ... ⊗ m) ... ⊗ y ⊗ z) = (a ⊗ b ⊗ ... ⊗ l ⊗ ... ⊗ m ⊗ ... ⊗ y ⊗ z) Or calling (R3 ⊗ (R3 ⊗ R3)).disassociate(1) would return |- (R3 ⊗ (R3 ⊗ R3)) = (R3 ⊗ R3 ⊗ R3) For this to work in the vectors case, the vector operands must be known to be in vector spaces of a common field. If the field is not specified, then VecSpaces.default_field is used. For this to work in the case of CartExp operands, all operands must be (recursively) CartExps and each must be known to be a vector space. ''' # ORIGINAL BELOW before augmenting for CartExp cases # from . import tensor_prod_disassociation # _V = VecSpaces.known_vec_space(self, field=field) # _K = VecSpaces.known_field(_V) # eq = apply_disassociation_thm( # self, idx, tensor_prod_disassociation, # repl_map_extras={K:_K, V:_V}).derive_consequent() # return eq.with_wrapping_at() if not TensorProd.all_ops_are_cart_exp(self): from . import tensor_prod_disassociation _V = VecSpaces.known_vec_space(self, field=field) _K = VecSpaces.known_field(_V) eq = apply_disassociation_thm( self, idx, tensor_prod_disassociation, repl_map_extras={K:_K, V:_V}).derive_consequent() return eq.with_wrapping_at() else: from . import tensor_prod_vec_space_disassociation if field is None: _K = VecSpaces.known_field(self.operands[0]) else: _K = field eq = apply_disassociation_thm( self, idx, tensor_prod_vec_space_disassociation, repl_map_extras={K:_K}) return eq.with_wrapping_at()
def scalar_factorization(self, idx=None, *, field=None, **defaults_config): ''' Prove the factorization of a scalar from one of the tensor product operands and return the original tensor product equal to the factored version. If idx is provided, it will specify the (0-based) index location of the ScalarMult operand with the multiplier to factor out. If no idx is provided, the first ScalarMult operand will be targeted. For example, TensorProd(a, ScalarMult(c, b), d).factorization(1) returns |- TensorProd(a, ScalarMult(c, b), d) = c TensorProd(a, b, d) As a prerequisite, the operands must be known to be vectors in vector spaces over a common field which contains the scalar multiplier being factored. If the field is not specified, then VecSpaces.default_field is used. ''' from . import factor_scalar_from_tensor_prod if idx is None: for _k, operand in enumerate(self.operands): if isinstance(operand, ScalarMult): idx = _k break elif idx < 0: # use wrap-around indexing idx = self.operand.num_entries() + idx if not isinstance(self.operands[idx], ScalarMult): raise TypeError("Expected the 'operand' and 'operand_idx' to be " "a ScalarMult") _V = VecSpaces.known_vec_space(self, field=field) _K = VecSpaces.known_field(_V) _alpha = self.operands[idx].scalar _a = self.operands[:idx] _b = self.operands[idx].scaled _c = self.operands[idx+1:] _i = _a.num_elements() _k = _c.num_elements() impl = factor_scalar_from_tensor_prod.instantiate( {K:_K, alpha:_alpha, i:_i, k:_k, V:_V, a:_a, b:_b, c:_c}) return impl.derive_consequent().with_wrapping_at()
def deduce_in_vec_space(self, vec_space=None, *, field, **defaults_config): ''' Prove that this scaled vector is in a vector space. The vector space may be specified or inferred via known memberships. A field for the vector space must be specified. ''' from . import scalar_mult_closure if vec_space is None: # No vector space given, so we'll have to look for # a known membership of 'scaled' in a vector space. # This may be arbitrarily chosen. vec_space = VecSpaces.known_vec_space(self.scaled, field=field) field = VecSpaces.known_field(vec_space) return scalar_mult_closure.instantiate({ K: field, V: vec_space, a: self.scalar, x: self.scaled })
def compute_norm(self, **defaults_config): ''' Proves ‖x + y‖ = sqrt(‖x‖^2 + ‖y‖^2) if the inner product of x and y is zero. ''' from proveit.linear_algebra import InnerProd, ScalarMult from . import norm_of_sum_of_orthogonal_pair if self.operands.is_double(): _a, _b = self.operands _x = _a.scaled if isinstance(_a, ScalarMult) else _a _y = _b.scaled if isinstance(_b, ScalarMult) else _b if Equals(InnerProd(_x, _y), zero).proven(): vec_space = VecSpaces.known_vec_space(_a) field = VecSpaces.known_field(vec_space) return norm_of_sum_of_orthogonal_pair.instantiate({ K: field, V: vec_space, a: _a, b: _b }) raise NotImplementedError( "VecAdd.compute_norm is only implemented for an " "orthogonal pair of vectors")
def deduce_in_vec_space(self, vec_space=None, *, field=None, **defaults_config): ''' Prove that this vector summation is in a vector space. ''' from . import summation_closure if vec_space is None: with defaults.temporary() as tmp_defaults: tmp_defaults.assumptions = (defaults.assumptions + self.conditions.entries) vec_space = VecSpaces.known_vec_space(self.summand, field=field) _V = vec_space _K = VecSpaces.known_field(_V) _b = self.indices _j = _b.num_elements() _f = Lambda(self.indices, self.summand) if not hasattr(self, 'condition'): print(self) _Q = Lambda(self.indices, self.condition) return summation_closure.instantiate( {j:_j, K:_K, f:_f, Q:_Q, V:_V, b:_b}).derive_consequent()
def conclude(self, **defaults_config): ''' Called on self = [elem in (A x B x ...)] (where x denotes a tensor product and elem = a x b x ...) and knowing or assuming that (a in A) and (b in B) and ..., derive and return self. ''' if isinstance(self.element, TensorProd): from . import tensor_prod_is_in_tensor_prod_space from proveit.linear_algebra import VecSpaces # we will need the domain acknowledged as a VecSpace # so we can later get its underlying field self.domain.deduce_as_vec_space() _a_sub = self.element.operands _i_sub = _a_sub.num_elements() _K_sub = VecSpaces.known_field(self.domain) vec_spaces = self.domain.operands return tensor_prod_is_in_tensor_prod_space.instantiate({ a: _a_sub, i: _i_sub, K: _K_sub, V: vec_spaces }) if isinstance(self.element, ScalarMult): from proveit.linear_algebra.scalar_multiplication import ( scalar_mult_closure) from proveit.linear_algebra import VecSpaces self.domain.deduce_as_vec_space() _V_sub = VecSpaces.known_vec_space(self.element.scaled, field=None) _K_sub = VecSpaces.known_field(_V_sub) _a_sub = self.element.scalar _x_sub = self.element.scaled return scalar_mult_closure.instantiate({ V: _V_sub, K: _K_sub, a: _a_sub, x: _x_sub }) if isinstance(self.element, VecSum): from proveit.linear_algebra.addition import summation_closure from proveit.linear_algebra import VecSpaces self.domain.deduce_as_vec_space() # might want to change the following to use # vec_space_membership = self.element.summand.deduce_in_vec_space() # then _V_sub = vec_space_membership.domain _V_sub = VecSpaces.known_vec_space(self.element.summand) _K_sub = VecSpaces.known_field(_V_sub) _b_sub = self.element.indices _j_sub = _b_sub.num_elements() _f_sub = Lambda(self.element.indices, self.element.summand) _Q_sub = Lambda(self.element.indices, self.element.condition) imp = summation_closure.instantiate({ V: _V_sub, K: _K_sub, b: _b_sub, j: _j_sub, f: _f_sub, Q: _Q_sub }) return imp.derive_consequent() raise ProofFailure( self, defaults.assumptions, "Element {0} is neither a TensorProd " "nor a ScalarMult.".format(self.element))