Ejemplo n.º 1
0
 def __init__(self, coreInfo, subExpressions=tuple(), styles=dict(), requirements=tuple()):
     '''
     Initialize an expression with the given coreInfo (information relevant at the core Expression-type
     level) which should be a list (or tuple) of strings, and a list (or tuple) of subExpressions.
     "styles" is a dictionary used to indicate how the Expression should be formatted
     when there are different possibilities (e.g. division with '/' or as a fraction).  The meaning
     of the expression is independent of its styles signature.
     The "requirements" are expressions that must be proven to be true in order for the Expression
     to make sense.
     '''
     for coreInfoElem in coreInfo:
         if not isinstance(coreInfoElem, str):
             raise TypeError('Expecting coreInfo elements to be of string type')
     for subExpression in subExpressions:
         if not isinstance(subExpression, Expression):
             raise TypeError('Expecting subExpression elements to be of Expression type')
     # unique_rep is a unique representation based upon the coreInfo and unique_id's of sub-Expressions
     self._coreInfo, self._subExpressions = tuple(coreInfo), subExpressions
     self._styles = dict(styles) # formatting style options that don't affect the meaning of the expression
     # meaning representations and unique ids are independent of style
     self._meaning_rep = self._generate_unique_rep(lambda expr : hex(expr._meaning_id))
     self._meaning_id = makeUniqueId(self._meaning_rep)
     # style representations and unique ids are dependent of style
     self._style_rep = self._generate_unique_rep(lambda expr : hex(expr._style_id), includeStyle=True)
     self._style_id = makeUniqueId(self._style_rep)
     for subExpression in subExpressions: # update Expression.parent_expr_map
         addParent(subExpression, self)
     # combine requirements from all sub-expressions
     requirements = sum([tuple(subExpression.requirements) for subExpression in subExpressions], tuple()) + requirements
     # Expression requirements are essentially assumptions that need to be proven for the expression to
     # be valid.  Calling "checkAssumptions" will remove repeats and generate proof by assumption for each
     # (which may not be necessary, but does not hurt).   
     self.requirements = defaults.checkedAssumptions(requirements)
Ejemplo n.º 2
0
 def substituted(self, exprMap, relabelMap=None, reservedVars=None, assumptions=USE_DEFAULTS, requirements=None):
     '''
     Return this expression with its variables substituted 
     according to subMap and/or relabeled according to relabelMap.
     The Lambda parameters have their own scope within the Lambda 
     body and do not get substituted.  They may be relabeled, however. 
     Substitutions within the Lambda body are restricted to 
     exclude the Lambda parameters themselves (these Variables 
     are reserved), consistent with any relabeling.
     '''
     from proveit.logic import Forall
     
     self._checkRelabelMap(relabelMap)
     if len(exprMap)>0 and (self in exprMap):
         # the full expression is to be substituted
         return exprMap[self]._restrictionChecked(reservedVars)        
     if relabelMap is None: relabelMap = dict()
     assumptions = defaults.checkedAssumptions(assumptions)
     
     new_params, inner_expr_map, inner_assumptions, inner_reservations = self._innerScopeSub(exprMap, relabelMap, reservedVars, assumptions, requirements)
     
     # conditions with substitutions:
     condition_requirements = []
     condition_assumptions = inner_assumptions
     subbedConditions = self.conditions.substituted(inner_expr_map, relabelMap, inner_reservations, condition_assumptions, condition_requirements)
     # The lambda body with the substitutions.  Add the conditions, with substitutions, as assumptions
     # since they must be satisfied for the mapping to be well-defined.
     body_requirements = []
     body_assumptions = list(inner_assumptions)+list(subbedConditions)
     subbedBody = self.body.substituted(inner_expr_map, relabelMap, inner_reservations, body_assumptions, body_requirements)
     
     for requirements, requirements_assumptions in zip((condition_requirements, body_requirements), ([], subbedConditions)):
         for requirement in requirements:
             if requirement.freeVars().isdisjoint(new_params):
                 requirements.append(requirement)
             else:
                 # When the requirement involves any of the lambda parameters, we must universally quantify
                 # over those parameters, with appropriate conditions.  Appropriate conditions are the
                 # applicable assumptions that involve the lambda parameters.  This excludes the 'inner_assumptions'
                 # because they cannot involve lambda parameters (those were excluded).
                 requirement_params = requirement.freeVars().intersection(new_params)
                 requirement_conditions = [condition for condition in requirements_assumptions if not new_params.isdisjoint(condition.freeVars())]
                 requirement = Forall(requirement_params, requirement, conditions=requirement_conditions)
                 requirements.append(requirement)
                 raise ScopingViolation("Substitution requirements must not involve Lambda parameters")
     
     try:
         newLambda = Lambda(new_params, subbedBody, subbedConditions)
     except TypeError as e:
         raise ImproperSubstitution(e.args[0])
     except ValueError as e:
         raise ImproperSubstitution(e.args[0])            
     return newLambda
Ejemplo n.º 3
0
    def __init__(self,
                 tensor,
                 shape=None,
                 styles=None,
                 assumptions=USE_DEFAULTS,
                 requirements=tuple()):
        '''
        Create an ExprTensor either with a simple, dense tensor (list of lists ... of lists) or
        with a dictionary mapping coordinates (as tuples of expressions that represent integers) 
        to expr elements or Blocks.
        Providing starting and/or ending location(s) can extend the bounds of the tensor beyond
        the elements that are supplied.
        '''
        from .composite import _simplifiedCoord
        from proveit._core_ import KnownTruth
        from proveit.number import Less, Greater, zero, one, num, Add, Subtract

        assumptions = defaults.checkedAssumptions(assumptions)
        requirements = []
        if not isinstance(tensor, dict):
            tensor = {
                loc: element
                for loc, element in ExprTensor._tensorDictFromIterables(
                    tensor, assumptions, requirements)
            }

        # Map direct compositions for the end-coordinate of Iter elements
        # to their simplified forms.
        self.endCoordSimplifications = dict()

        # generate the set of distinct coordinates for each dimension
        coord_sets = None  # simplified versions
        full_tensor = dict()
        ndims = None
        if shape is not None:
            shape = ExprTensor.locAsExprs(shape)
            ndims = len(shape)
        for loc, element in tensor.items():
            if isinstance(element, KnownTruth):
                element = element.expr  # extract the Expression from the KnownTruth
            ndims = len(loc)
            if coord_sets is None:
                coord_sets = [set() for _ in range(ndims)]
            elif len(coord_sets) != ndims:
                if shape is not None:
                    raise ValueError(
                        "length of 'shape' is inconsistent with number of dimensions for ExprTensor locations"
                    )
                else:
                    raise ValueError(
                        "inconsistent number of dimensions for locations of the ExprTensor"
                    )
            for axis, coord in enumerate(list(loc)):
                if isinstance(coord, int):
                    coord = num(
                        coord)  # convert from Python int to an Expression
                    loc[axis] = coord
                coord_sets[axis].add(coord)
                if isinstance(element, Iter):
                    # Add (end-start)+1 of the Iter to get to the end
                    # location of the entry along this axis.
                    orig_end_coord = Add(
                        coord,
                        Subtract(element.end_indices[axis],
                                 element.start_indices[axis]), one)
                    end_coord = _simplifiedCoord(orig_end_coord, assumptions,
                                                 requirements)
                    self.endCoordSimplifications[orig_end_coord] = end_coord
                    coord_sets[axis].add(end_coord)
            full_tensor[tuple(loc)] = element

        if ndims is None:
            raise ExprTensorError("Empty ExprTensor is not allowed")
        if ndims <= 1:
            raise ExprTensorError(
                "ExprTensor must be 2 or more dimensions (use an ExprList for something 1-dimensional"
            )

        # in each dimension, coord_indices will be a dictionary
        # that maps each tensor location coordinate to its relative entry index.
        coord_rel_indices = []
        self.sortedCoordLists = []
        self.coordDiffRelationLists = []
        for axis in range(ndims):  # for each axis
            # KnownTruth sorting relation for the simplified coordinates used along this axis
            # (something with a form like a < b <= c = d <= e, that sorts the tensor location coordinates):
            coord_sorting_relation = Less.sort(coord_sets[axis],
                                               assumptions=assumptions)
            sorted_coords = list(coord_sorting_relation.operands)

            if shape is None:
                # Since nothing was explicitly specified, the shape is dictacted by extending
                # one beyond the last coordinate entry.
                sorted_coords.append(Add(sorted_coords[-1], one))
            else:
                sorted_coords.append(
                    shape[axis]
                )  # append the coordinate for the explicitly specified shape
            if sorted_coords[0] != zero:
                sorted_coords.insert(
                    0, zero
                )  # make sure the first of the sorted coordinates is zero.

            self.sortedCoordLists.append(ExprList(sorted_coords))

            # Add in coordinate expressions that explicitly indicate the difference between coordinates.
            # These may be used in generating the latex form of the ExprTensor.
            diff_relations = []
            for c1, c2 in zip(sorted_coords[:-1], sorted_coords[1:]):
                diff = _simplifiedCoord(Subtract(c2, c1), assumptions,
                                        requirements)
                # get the relationship between the difference of successive coordinate and zero.
                diff_relation = Greater.sort([zero, diff],
                                             assumptions=assumptions)
                if isinstance(diff_relation, Greater):
                    if c2 == sorted_coords[-1] and shape is not None:
                        raise ExprTensorError(
                            "Coordinates extend beyond the specified shape in axis %d: %s after %s"
                            % (axis, str(coord_sorting_relation.operands[-1]),
                               str(shape[axis])))
                    assert tuple(diff_relation.operands) == (
                        diff, zero), 'Inconsistent Less.sort results'
                    # diff > 0, let's compare it with one now
                    diff_relation = Greater.sort([one, diff],
                                                 assumptions=assumptions)
                requirements.append(diff_relation)
                diff_relations.append(diff_relation)
            self.coordDiffRelationLists.append(ExprList(diff_relations))

            # map each coordinate expression to its index into the sorting_relation operands
            coord_rel_indices.append(
                {coord: k
                 for k, coord in enumerate(sorted_coords)})

        # convert from the full tensor with arbitrary expression coordinates to coordinates that are
        # mapped according to sorted relation enumerations.
        rel_index_tensor = dict()
        for loc, element in full_tensor.items():
            rel_index_loc = (
                rel_index_map[coord]
                for coord, rel_index_map in zip(loc, coord_rel_indices))
            rel_index_tensor[rel_index_loc] = element

        sorted_keys = sorted(rel_index_tensor.keys())
        Expression.__init__(self, [
            'ExprTensor',
            str(ndims), ';'.join(str(key) for key in sorted_keys)
        ],
                            self.sortedCoordLists +
                            self.coordDiffRelationLists +
                            [rel_index_tensor[key] for key in sorted_keys],
                            styles=styles,
                            requirements=requirements)
        self.ndims = ndims
        self.relIndexTensor = rel_index_tensor

        # entryOrigins maps relative indices that contain tensor elements to
        # the relative indices of the origin for the corresponding entry.
        # Specifically, single-element entries map indices to themselves, but
        # multi-element Iter entries map each of the encompassed
        # relative index location to the origin relative index location where
        # that Iter entry is stored.
        self.relEntryOrigins = self._makeEntryOrigins()

        # the last coordinates of the sorted coordinates along each eaxis define the shape:
        self.shape = ExprList(
            [sorted_coords[-1] for sorted_coords in self.sortedCoordLists])
Ejemplo n.º 4
0
    def substituted(self,
                    exprMap,
                    relabelMap=None,
                    reservedVars=None,
                    assumptions=USE_DEFAULTS,
                    requirements=None):
        '''
        Returns this expression with the substitutions made 
        according to exprMap and/or relabeled according to relabelMap.
        Attempt to automatically expand the iteration if any Indexed 
        sub-expressions substitute their variable for a composite
        (list or tensor).  Indexed should index variables that represent
        composites, but substituting the composite is a signal that
        an outer iteration should be expanded.  An exception is
        raised if this fails.
        '''
        from proveit.logic import Equals
        from proveit.number import Less, LessEq, Subtract, Add, one
        from composite import _simplifiedCoord
        from proveit._core_.expression.expr import _NoExpandedIteration

        assumptions = defaults.checkedAssumptions(assumptions)
        arg_sorting_assumptions = list(assumptions)

        new_requirements = []

        # Collect the iteration ranges from Indexed sub-Expressions
        # whose variable is being replaced with a Composite (list or tensor).
        # If there are not any, we won't expand the iteration at this point.
        # While we are at it, get all of the end points of the
        # ranges along each axis (as well as end points +/-1 that may be
        # needed if there are overlaps): 'special_points'.
        iter_ranges = set()
        iter_params = self.lambda_map.parameters
        special_points = [set() for _ in xrange(len(iter_params))]
        subbed_start = self.start_indices.substituted(exprMap, relabelMap,
                                                      reservedVars,
                                                      assumptions,
                                                      new_requirements)
        subbed_end = self.end_indices.substituted(exprMap, relabelMap,
                                                  reservedVars, assumptions,
                                                  new_requirements)
        try:
            for iter_range in self.lambda_map.body._expandingIterRanges(
                    iter_params, subbed_start, subbed_end, exprMap, relabelMap,
                    reservedVars, assumptions, new_requirements):
                iter_ranges.add(iter_range)
                for axis, (start, end) in enumerate(zip(*iter_range)):
                    special_points[axis].add(start)
                    special_points[axis].add(end)
                    # Preemptively include start-1 and end+1 in case it is required for splitting up overlapping ranges
                    # (we won't add simplification requirements until we find we actually need them.)
                    # Not necesary in the 1D case.
                    # Add the coordinate simplification to argument sorting assumtions -
                    # after all, this sorting does not go directly into the requirements.
                    start_minus_one = _simplifiedCoord(
                        Subtract(start, one),
                        assumptions=assumptions,
                        requirements=arg_sorting_assumptions)
                    end_plus_one = _simplifiedCoord(
                        Add(end, one),
                        assumptions=assumptions,
                        requirements=arg_sorting_assumptions)
                    special_points[axis].update(
                        {start_minus_one, end_plus_one})
                    # Add start-1<start and end<end+1 assumptions to ease argument sorting -
                    # after all, this sorting does not go directly into the requirements.
                    arg_sorting_assumptions.append(Less(
                        start_minus_one, start))
                    arg_sorting_assumptions.append(Less(end, end_plus_one))
                    arg_sorting_assumptions.append(
                        Equals(end, Subtract(end_plus_one, one)))
                    # Also add start<=end to ease the argument sorting requirement even though it
                    # may not strictly be true if an empty range is possible.  In such a case, we
                    # still want things sorted this way while we don't know if the range is empty or not
                    # and it does not go directly into the requirements.
                    arg_sorting_assumptions.append(LessEq(start, end))

            # There are Indexed sub-Expressions whose variable is
            # being replaced with a Composite, so let us
            # expand the iteration for all of the relevant
            # iteration ranges.
            # Sort the argument value ranges.

            arg_sorting_relations = []
            for axis in xrange(self.ndims):
                if len(special_points[axis]) == 0:
                    arg_sorting_relation = None
                else:
                    arg_sorting_relation = Less.sort(
                        special_points[axis],
                        assumptions=arg_sorting_assumptions)
                arg_sorting_relations.append(arg_sorting_relation)

            # Put the iteration ranges in terms of indices of the sorting relation operands
            # (relative indices w.r.t. the sorting relation order).
            rel_iter_ranges = set()
            for iter_range in iter_ranges:
                range_start, range_end = iter_range
                rel_range_start = tuple([
                    arg_sorting_relation.operands.index(arg)
                    for arg, arg_sorting_relation in zip(
                        range_start, arg_sorting_relations)
                ])
                rel_range_end = tuple([
                    arg_sorting_relation.operands.index(arg)
                    for arg, arg_sorting_relation in zip(
                        range_end, arg_sorting_relations)
                ])
                rel_iter_ranges.add((rel_range_start, rel_range_end))

            rel_iter_ranges = sorted(
                self._makeNonoverlappingRangeSet(rel_iter_ranges,
                                                 arg_sorting_relations,
                                                 assumptions,
                                                 new_requirements))

            # Generate the expanded list/tensor to replace the iterations.
            if self.ndims == 1: lst = []
            else: tensor = dict()
            for rel_iter_range in rel_iter_ranges:
                # get the starting location of this iteration range
                start_loc = tuple(
                    arg_sorting_relation.operands[idx]
                    for arg_sorting_relation, idx in zip(
                        arg_sorting_relations, rel_iter_range[0]))
                if rel_iter_range[0] == rel_iter_range[1]:
                    # single element entry (starting and ending location the same)
                    inner_expr_map = dict(exprMap)
                    inner_expr_map.update({
                        param: arg
                        for param, arg in zip(self.lambda_map.parameters,
                                              start_loc)
                    })
                    for param in self.lambda_map.parameters:
                        relabelMap.pop(param, None)
                    entry = self.lambda_map.body.substituted(
                        inner_expr_map, relabelMap, reservedVars, assumptions,
                        new_requirements)
                else:
                    # iterate over a sub-range
                    end_loc = tuple(
                        arg_sorting_relation.operands[idx]
                        for arg_sorting_relation, idx in zip(
                            arg_sorting_relations, rel_iter_range[1]))
                    # Shift the iteration parameter so that the iteration will have the same start-indices
                    # for this sub-range (like shifting a viewing window, moving the origin to the start of the sub-range).
                    # Include assumptions that the lambda_map parameters are in the shifted start_loc to end_loc range.
                    range_expr_map = dict(exprMap)
                    range_assumptions = list(assumptions)
                    for start_idx, param, range_start, range_end in zip(
                            self.start_indices, self.lambda_map.parameters,
                            start_loc, end_loc):
                        range_expr_map[param] = Add(
                            param, Subtract(range_start, start_idx))
                        range_assumptions += Less.sort((start_idx, param),
                                                       reorder=False,
                                                       assumptions=assumptions)
                        range_assumptions += Less.sort(
                            (param, Subtract(range_end, start_idx)),
                            reorder=False,
                            assumptions=assumptions)
                    range_lambda_body = self.lambda_map.body.substituted(
                        range_expr_map, relabelMap, reservedVars,
                        range_assumptions, new_requirements)
                    range_lambda_map = Lambda(self.lambda_map.parameters,
                                              range_lambda_body)
                    # Add the shifted sub-range iteration to the appropriate starting location.
                    end_indices = [
                        _simplifiedCoord(Subtract(range_end, start_idx),
                                         assumptions, new_requirements)
                        for start_idx, range_end in zip(
                            self.start_indices, end_loc)
                    ]
                    entry = Iter(range_lambda_map, self.start_indices,
                                 end_indices)
                if self.ndims == 1: lst.append(entry)
                else: tensor[start_loc] = entry

            if self.ndims == 1:
                subbed_self = compositeExpression(lst)
            else:
                subbed_self = compositeExpression(tensor)

        except _NoExpandedIteration:
            # No Indexed sub-Expressions whose variable is
            # replaced with a Composite, so let us not expand the
            # iteration.  Just do an ordinary substitution.
            subbed_map = self.lambda_map.substituted(exprMap, relabelMap,
                                                     reservedVars, assumptions,
                                                     new_requirements)
            subbed_self = Iter(subbed_map, subbed_start, subbed_end)

        for requirement in new_requirements:
            requirement._restrictionChecked(
                reservedVars
            )  # make sure requirements don't use reserved variable in a nested scope
        if requirements is not None:
            requirements += new_requirements  # append new requirements

        return subbed_self
Ejemplo n.º 5
0
    def __init__(self,
                 coreInfo,
                 subExpressions=tuple(),
                 styles=None,
                 requirements=tuple()):
        '''
        Initialize an expression with the given coreInfo (information relevant at the core Expression-type
        level) which should be a list (or tuple) of strings, and a list (or tuple) of subExpressions.
        "styles" is a dictionary used to indicate how the Expression should be formatted
        when there are different possibilities (e.g. division with '/' or as a fraction).  The meaning
        of the expression is independent of its styles signature.
        The "requirements" are expressions that must be proven to be true in order for the Expression
        to make sense.
        '''
        if styles is None: styles = dict()
        for coreInfoElem in coreInfo:
            if not isinstance(coreInfoElem, str):
                raise TypeError(
                    'Expecting coreInfo elements to be of string type')
        for subExpression in subExpressions:
            if not isinstance(subExpression, Expression):
                raise TypeError(
                    'Expecting subExpression elements to be of Expression type'
                )

        # note: these contained expressions are subject to style changes on an Expression instance basis
        self._subExpressions = tuple(subExpressions)

        # check for illegal characters in core-info or styles
        if any(',' in info for info in coreInfo):
            raise ValueError("coreInfo is not allowed to contain a comma.")
        if styles is not None:
            for style in styles.values():
                if not {',', ':', ';'}.isdisjoint(style):
                    raise ValueError(
                        "Styles are not allowed to contain a ',', ':', or ';'.  Just use spaces."
                    )

        # Set and initialize the "meaning data".
        # The meaning data is shared among Expressions with the same
        # structure disregarding style or chosen lambda paramterization.
        if hasattr(self, '_genericExpr') and self._genericExpr is not self:
            # The _genericExpr attribute was already set
            # -- must be a Lambda Expression.
            self._meaningData = self._genericExpr._meaningData
        else:
            object_rep_fn = lambda expr: hex(expr._meaning_id)
            self._meaningData = meaningData(
                self._generate_unique_rep(object_rep_fn, coreInfo))
            if not hasattr(self._meaningData, '_coreInfo'):
                # initialize the data of self._meaningData
                self._meaningData._coreInfo = tuple(coreInfo)
                # combine requirements from all sub-expressions
                requirements = sum([
                    subExpression.getRequirements()
                    for subExpression in subExpressions
                ], tuple()) + requirements
                # Expression requirements are essentially assumptions that need
                # to be proven for the expression to be valid.  Calling
                # "checkAssumptions" will remove repeats and generate proof by
                # assumption for each (which may not be necessary, but does not
                # hurt).
                self._meaningData._requirements = \
                    defaults.checkedAssumptions(requirements)

        # The style data is shared among Expressions with the same structure and style -- this will contain the 'png' generated on demand.
        self._styleData = styleData(
            self._generate_unique_rep(lambda expr: hex(expr._style_id),
                                      coreInfo, styles))
        # initialize the style options
        self._styleData.styles = dict(
            styles
        )  # formatting style options that don't affect the meaning of the expression

        # reference this unchanging data of the unique 'meaning' data
        self._meaning_id = self._meaningData._unique_id
        self._coreInfo = self._meaningData._coreInfo
        self._requirements = self._meaningData._requirements

        self._style_id = self._styleData._unique_id
        """
        self._styles = dict(styles) # formatting style options that don't affect the meaning of the expression
        # meaning representations and unique ids are independent of style
        self._meaning_rep = 
        self._meaning_id = makeUniqueId(self._meaning_rep)
        # style representations and style ids are dependent of style
        self._style_rep = self._generate_unique_rep(lambda expr : hex(expr._style_id), includeStyle=True)
        self._style_id = makeUniqueId(self._style_rep)
        """
        for subExpression in subExpressions:  # update Expression.parent_expr_map
            self._styleData.addChild(self, subExpression)
Ejemplo n.º 6
0
    def prove(self, assumptions=USE_DEFAULTS, automation=USE_DEFAULTS):
        '''
        Attempt to prove this expression automatically under the
        given assumptions (if None, uses defaults.assumptions).  First
        it tries to find an existing KnownTruth, then it tries a simple
        proof by assumption (if self is contained in the assumptions),
        then it attempts to call the 'conclude' method.  If successful,
        the KnownTruth is returned, otherwise an exception is raised.
        Cyclic attempts to `conclude` the same expression under the
        same set of assumptions will be blocked, so `conclude` methods are
        free make attempts that may be cyclic.
        '''
        from proveit import KnownTruth, ProofFailure
        from proveit.logic import Not
        assumptions = defaults.checkedAssumptions(assumptions)
        assumptionsSet = set(assumptions)
        if automation is USE_DEFAULTS:
            automation = defaults.automation

        # Note: exclude WILDCARD_ASSUMPTIONS when looking for an existing proof.
        #   (may not matter, but just in case).
        foundTruth = KnownTruth.findKnownTruth(self, (assumptionsSet - {'*'}))
        if foundTruth is not None:
            foundTruth.withMatchingStyles(
                self, assumptions)  # give it the appropriate style
            return foundTruth  # found an existing KnownTruth that does the job!

        if self in assumptionsSet or '*' in assumptionsSet:
            # prove by assumption if self is in the list of assumptions or
            # WILDCARD_ASSUMPTIONS is in the list of assumptions.
            from proveit._core_.proof import Assumption
            return Assumption.makeAssumption(self, assumptions).provenTruth

        if not automation:
            raise ProofFailure(self, assumptions, "No pre-existing proof")

        # Use Expression.in_progress_to_conclude set to prevent an infinite recursion
        in_progress_key = (self,
                           tuple(
                               sorted(assumptions,
                                      key=lambda expr: hash(expr))))
        if in_progress_key in Expression.in_progress_to_conclude:
            raise ProofFailure(self, assumptions,
                               "Infinite 'conclude' recursion blocked.")
        Expression.in_progress_to_conclude.add(in_progress_key)

        try:
            concludedTruth = None
            if isinstance(self, Not):
                # if it is a Not expression, try concludeNegation on the operand
                try:
                    concludedTruth = self.operands[0].concludeNegation(
                        assumptions=assumptions)
                except NotImplementedError:
                    pass  # that didn't work, try conclude on the Not expression itself
            if concludedTruth is None:
                try:
                    # first attempt to prove via implication
                    concludedTruth = self.concludeViaImplication(assumptions)
                except ProofFailure:
                    # try the 'conclude' method of the specific Expression class
                    concludedTruth = self.conclude(assumptions)
            if concludedTruth is None:
                raise ProofFailure(self, assumptions,
                                   "Failure to automatically 'conclude'")
            if not isinstance(concludedTruth, KnownTruth):
                raise ValueError(
                    "'conclude' method should return a KnownTruth (or raise an exception)"
                )
            if concludedTruth.expr != self:
                raise ValueError(
                    "'conclude' method should return a KnownTruth for this Expression object: "
                    + str(concludedTruth.expr) + " does not match " +
                    str(self))
            if not concludedTruth.assumptionsSet.issubset(assumptionsSet):
                raise ValueError(
                    "While proving " + str(self) +
                    ", 'conclude' method returned a KnownTruth with extra assumptions: "
                    + str(set(concludedTruth.assumptions) - assumptionsSet))
            if concludedTruth.expr._style_id == self._style_id:
                return concludedTruth  # concludedTruth with the same style as self.
            return concludedTruth.withMatchingStyles(
                self, assumptions)  # give it the appropriate style
        except NotImplementedError:
            raise ProofFailure(
                self, assumptions,
                "'conclude' method not implemented for proof automation")
        finally:
            Expression.in_progress_to_conclude.remove(in_progress_key)
Ejemplo n.º 7
0
    def __init__(self,
                 coreInfo,
                 subExpressions=tuple(),
                 styles=dict(),
                 requirements=tuple()):
        '''
        Initialize an expression with the given coreInfo (information relevant at the core Expression-type
        level) which should be a list (or tuple) of strings, and a list (or tuple) of subExpressions.
        "styles" is a dictionary used to indicate how the Expression should be formatted
        when there are different possibilities (e.g. division with '/' or as a fraction).  The meaning
        of the expression is independent of its styles signature.
        The "requirements" are expressions that must be proven to be true in order for the Expression
        to make sense.
        '''
        for coreInfoElem in coreInfo:
            if not isinstance(coreInfoElem, str):
                raise TypeError(
                    'Expecting coreInfo elements to be of string type')
        for subExpression in subExpressions:
            if not isinstance(subExpression, Expression):
                raise TypeError(
                    'Expecting subExpression elements to be of Expression type'
                )

        # note: these contained expressions are subject to style changes on an Expression instance basis
        self._subExpressions = subExpressions

        # The meaning data is shared among Expressions with the same structure disregarding style
        self._meaningData = meaningData(
            self._generate_unique_rep(lambda expr: hex(expr._meaning_id),
                                      coreInfo))
        if not hasattr(self._meaningData, '_coreInfo'):
            # initialize the data of self._meaningData
            self._meaningData._coreInfo = tuple(coreInfo)
            # combine requirements from all sub-expressions
            requirements = sum([
                subExpression.getRequirements()
                for subExpression in subExpressions
            ], tuple()) + requirements
            # Expression requirements are essentially assumptions that need to be proven for the expression to
            # be valid.  Calling "checkAssumptions" will remove repeats and generate proof by assumption for each
            # (which may not be necessary, but does not hurt).
            self._meaningData._requirements = defaults.checkedAssumptions(
                requirements)

        # The style data is shared among Expressions with the same structure and style -- this will contain the 'png' generated on demand.
        self._styleData = styleData(
            self._generate_unique_rep(lambda expr: hex(expr._style_id),
                                      coreInfo, styles))
        if not hasattr(self._meaningData, 'styles'):
            # initialize the data of self._styleData
            self._styleData.styles = dict(
                styles
            )  # formatting style options that don't affect the meaning of the expression

        # reference this unchanging data of the unique 'meaning' data
        self._meaning_id = self._meaningData._unique_id
        self._coreInfo = self._meaningData._coreInfo
        self._requirements = self._meaningData._requirements

        self._style_id = self._styleData._unique_id
        """
        self._styles = dict(styles) # formatting style options that don't affect the meaning of the expression
        # meaning representations and unique ids are independent of style
        self._meaning_rep = 
        self._meaning_id = makeUniqueId(self._meaning_rep)
        # style representations and style ids are dependent of style
        self._style_rep = self._generate_unique_rep(lambda expr : hex(expr._style_id), includeStyle=True)
        self._style_id = makeUniqueId(self._style_rep)
        """
        for subExpression in subExpressions:  # update Expression.parent_expr_map
            self._styleData.addChild(self, subExpression)
Ejemplo n.º 8
0
    def substituted(self,
                    exprMap,
                    relabelMap=None,
                    reservedVars=None,
                    assumptions=USE_DEFAULTS,
                    requirements=None):
        '''
        Returns this expression with the substitutions made 
        according to exprMap and/or relabeled according to relabelMap.
        Attempt to automatically expand the iteration if any Indexed 
        sub-expressions substitute their variable for a composite
        (list or tensor).  Indexed should index variables that represent
        composites, but substituting the composite is a signal that
        an outer iteration should be expanded.  An exception is
        raised if this fails.
        '''
        from .composite import _generateCoordOrderAssumptions
        from proveit import ProofFailure, ExprArray
        from proveit.logic import Equals, InSet
        from proveit.number import Less, LessEq, dist_add, \
            zero, one, dist_subtract, Naturals, Integers
        from .composite import _simplifiedCoord
        from proveit._core_.expression.expr import _NoExpandedIteration
        from proveit._core_.expression.label.var import safeDummyVars

        self._checkRelabelMap(relabelMap)
        if relabelMap is None: relabelMap = dict()

        assumptions = defaults.checkedAssumptions(assumptions)
        new_requirements = []
        iter_params = self.lambda_map.parameters
        iter_body = self.lambda_map.body
        ndims = self.ndims
        subbed_start = self.start_indices.substituted(exprMap, relabelMap,
                                                      reservedVars,
                                                      assumptions,
                                                      new_requirements)
        subbed_end = self.end_indices.substituted(exprMap, relabelMap,
                                                  reservedVars, assumptions,
                                                  new_requirements)

        #print("iteration substituted", self, subbed_start, subbed_end)

        # Need to handle the change in scope within the lambda
        # expression.  We won't use 'new_params'.  They aren't relavent
        # after an expansion, this won't be used.
        new_params, inner_expr_map, inner_assumptions, inner_reservations \
            = self.lambda_map._innerScopeSub(exprMap, relabelMap,
                  reservedVars, assumptions, new_requirements)

        # Get sorted substitution parameter start and end
        # values demarcating how the entry array must be split up for
        # each axis.
        all_entry_starts = [None] * ndims
        all_entry_ends = [None] * ndims
        do_expansion = False
        for axis in range(ndims):
            try:
                empty_eq = Equals(dist_add(subbed_end[axis], one),
                                  subbed_start[axis])
                try:
                    # Check if this is an empty iteration which
                    # happens when end+1=start.
                    empty_eq.prove(assumptions, automation=False)
                    all_entry_starts[axis] = all_entry_ends[axis] = []
                    do_expansion = True
                    continue
                except ProofFailure:
                    pass
                param_vals = \
                    iter_body._iterSubParamVals(axis, iter_params[axis],
                                                subbed_start[axis],
                                                subbed_end[axis],
                                                inner_expr_map, relabelMap,
                                                inner_reservations,
                                                inner_assumptions,
                                                new_requirements)
                assert param_vals[0] == subbed_start[axis]
                if param_vals[-1] != subbed_end[axis]:
                    # The last of the param_vals should either be
                    # subbed_end[axis] or known to be
                    # subbed_end[axis]+1.  Let's double-check.
                    eq = Equals(dist_add(subbed_end[axis], one),
                                param_vals[-1])
                    eq.prove(assumptions, automation=False)
                # Populate the entry starts and ends using the
                # param_vals which indicate that start of each contained
                # entry plus the end of this iteration.
                all_entry_starts[axis] = []
                all_entry_ends[axis] = []
                for left, right in zip(param_vals[:-1], param_vals[1:]):
                    all_entry_starts[axis].append(left)
                    try:
                        eq = Equals(dist_add(left, one), right)
                        eq.prove(assumptions, automation=False)
                        new_requirements.append(
                            eq.prove(assumptions, automation=False))
                        # Simple single-entry case: the start and end
                        # are the same.
                        entry_end = left
                    except:
                        # Not the simple case; perform the positive
                        # integrality check.
                        requirement = InSet(dist_subtract(right, left),
                                            Naturals)
                        # Knowing the simplification may help prove the
                        # requirement.
                        _simplifiedCoord(requirement, assumptions, [])
                        try:
                            new_requirements.append(
                                requirement.prove(assumptions))
                        except ProofFailure as e:
                            raise IterationError("Failed to prove requirement "
                                                 "%s:\n%s" % (requirement, e))
                        if right == subbed_end[axis]:
                            # This last entry is the inclusive end
                            # rather than past the end, so it is an
                            # exception.
                            entry_end = right
                        else:
                            # Subtract one from the start of the next
                            # entyr to get the end of this entry.
                            entry_end = dist_subtract(right, one)
                            entry_end = _simplifiedCoord(
                                entry_end, assumptions, requirements)
                    all_entry_ends[axis].append(entry_end)
                # See if we should add the end value as an extra
                # singular entry.  If param_vals[-1] is at the inclusive
                # end, then we have a singular final entry.
                if param_vals[-1] == subbed_end[axis]:
                    end_val = subbed_end[axis]
                    all_entry_starts[axis].append(end_val)
                    all_entry_ends[axis].append(end_val)
                else:
                    # Otherwise, the last param_val will be one after
                    # the inclusive end which we will want to use below
                    # when building the last iteration entry.
                    all_entry_starts[axis].append(param_vals[-1])
                do_expansion = True
            except EmptyIterException:
                # Indexing over a negative or empty range.  The only way this
                # should be allowed is if subbed_end+1=subbed_start.
                Equals(dist_add(subbed_end[axis], one),
                       subbed_start[axis]).prove(assumptions)
                all_entry_starts[axis] = all_entry_ends[axis] = []
                do_expansion = True
            except _NoExpandedIteration:
                pass

        if do_expansion:
            # There are Indexed sub-Expressions whose variable is
            # being replaced with a Composite, so let us
            # expand the iteration for all of the relevant
            # iteration ranges.
            # Sort the argument value ranges.

            # We must have "substition parameter values" along each
            # axis:
            if None in all_entry_starts or None in all_entry_ends:
                raise IterationError("Must expand all axes or none of the "
                                     "axes, when substituting %s" % str(self))

            # Generate the expanded tuple/array as the substition
            # of 'self'.
            shape = [len(all_entry_ends[axis]) for axis in range(ndims)]
            entries = ExprArray.make_empty_entries(shape)
            indices_by_axis = [range(extent) for extent in shape]
            #print('shape', shape, 'indices_by_axis', indices_by_axis, 'sub_param_vals', sub_param_vals)

            extended_inner_assumptions = list(inner_assumptions)
            for axis_starts in all_entry_starts:
                # Generate assumptions that order the
                # successive entry start parameter values
                # must be natural numbers. (This is a requirement for
                # iteration instances and is a simple fact of
                # succession for single entries.)
                extended_inner_assumptions.extend(
                    _generateCoordOrderAssumptions(axis_starts))

            # Maintain lists of parameter values that come before each given entry.
            #prev_param_vals = [[] for axis in range(ndims)]

            # Iterate over each of the new entries, obtaining indices
            # into sub_param_vals for the start parameters of the entry.
            for entry_indices in itertools.product(*indices_by_axis):
                entry_starts = [axis_starts[i] for axis_starts, i in \
                                zip(all_entry_starts, entry_indices)]
                entry_ends = [axis_ends[i] for axis_ends, i in \
                                zip(all_entry_ends, entry_indices)]

                is_singular_entry = True
                for entry_start, entry_end in zip(entry_starts, entry_ends):
                    # Note that empty ranges will be skipped because
                    # equivalent parameter values should be skipped in
                    # the param_vals above.
                    if entry_start != entry_end:
                        # Not a singular entry along this axis, so
                        # it is not a singular entry.  We must do an
                        # iteration for this entry.
                        is_singular_entry = False

                if is_singular_entry:
                    # Single element entry.

                    # Generate the entry by making appropriate
                    # parameter substitutions for the iteration body.
                    entry_inner_expr_map = dict(inner_expr_map)
                    entry_inner_expr_map.update({
                        param: arg
                        for param, arg in zip(iter_params, entry_starts)
                    })
                    for param in iter_params:
                        relabelMap.pop(param, None)
                    entry = iter_body.substituted(entry_inner_expr_map,
                                                  relabelMap,
                                                  inner_reservations,
                                                  extended_inner_assumptions,
                                                  new_requirements)
                else:
                    # Iteration entry.
                    # Shift the iteration parameter so that the
                    # iteration will have the same start-indices
                    # for this sub-range (like shifting a viewing
                    # window, moving the origin to the start of the
                    # sub-range).

                    # Generate "safe" new parameters (the Variables are
                    # not used for anything that might conflict).
                    # Avoid using free variables from these expressions:
                    unsafe_var_exprs = [self]
                    unsafe_var_exprs.extend(exprMap.values())
                    unsafe_var_exprs.extend(relabelMap.values())
                    unsafe_var_exprs.extend(entry_starts)
                    unsafe_var_exprs.extend(entry_ends)
                    new_params = safeDummyVars(ndims, *unsafe_var_exprs)

                    # Make assumptions that places the parameter(s) in the
                    # appropriate range and at an integral coordinate position.
                    # Note, it is possible that this actually represents an
                    # empty range and that these assumptions are contradictory;
                    # but this still suits our purposes regardless.
                    # Also, we will choose to shift the parameter so it
                    # starts at the start index of the iteration.
                    range_expr_map = dict(inner_expr_map)
                    range_assumptions = []
                    shifted_entry_ends = []
                    for axis, (param, new_param, entry_start, entry_end) \
                            in enumerate(zip(iter_params, new_params,
                                             entry_starts, entry_ends)):
                        start_idx = self.start_indices[axis]
                        shift = dist_subtract(entry_start, start_idx)
                        shift = _simplifiedCoord(shift, assumptions,
                                                 new_requirements)
                        if shift != zero:
                            shifted_param = dist_add(new_param, shift)
                        else:
                            shifted_param = new_param
                        range_expr_map[param] = shifted_param
                        shifted_end = dist_subtract(entry_end, shift)
                        shifted_end = _simplifiedCoord(shifted_end,
                                                       assumptions,
                                                       new_requirements)
                        shifted_entry_ends.append(shifted_end)
                        assumption = InSet(new_param, Integers)
                        range_assumptions.append(assumption)
                        assumption = LessEq(entry_start, shifted_param)
                        range_assumptions.append(assumption)
                        # Assume differences with each of the previous
                        # range starts are natural numbers as should be
                        # the case given requirements that have been
                        # met.
                        next_index = entry_indices[axis] + 1
                        prev_starts = all_entry_starts[axis][:next_index]
                        for prev_start in prev_starts:
                            assumption = InSet(
                                dist_subtract(shifted_param, prev_start),
                                Naturals)
                            range_assumptions.append(assumption)
                        next_start = all_entry_starts[axis][next_index]
                        assumption = Less(shifted_param, next_start)
                        range_assumptions.append(assumption)

                    # Perform the substitution.
                    # The fact that our "new parameters" are "safe"
                    # alleviates the need to reserve anything extra.
                    range_lambda_body = iter_body.substituted(
                        range_expr_map, relabelMap, reservedVars,
                        extended_inner_assumptions + range_assumptions,
                        new_requirements)
                    # Any requirements that involve the new parameters
                    # are a direct consequence of the iteration range
                    # and are not external requirements:
                    new_requirements = \
                        [requirement for requirement in new_requirements
                         if requirement.freeVars().isdisjoint(new_params)]
                    entry = Iter(new_params, range_lambda_body,
                                 self.start_indices, shifted_entry_ends)
                # Set this entry in the entries array.
                ExprArray.set_entry(entries, entry_indices, entry)
                '''      
                    # Iteration entry.
                    # Shift the iteration parameter so that the 
                    # iteration will have the same start-indices
                    # for this sub-range (like shifting a viewing 
                    # window, moving the origin to the start of the 
                    # sub-range).

                    # Generate "safe" new parameters (the Variables are
                    # not used for anything that might conflict).
                    # Avoid using free variables from these expressions:
                    unsafe_var_exprs = [self]
                    unsafe_var_exprs.extend(exprMap.values())
                    unsafe_var_exprs.extend(relabelMap.values())
                    unsafe_var_exprs.extend(entry_start_vals)
                    unsafe_var_exprs.extend(entry_end_vals)
                    new_params = safeDummyVars(len(iter_params), 
                                               *unsafe_var_exprs)
                    
                    # Make the appropriate substitution mapping
                    # and add appropriate assumptions for the iteration
                    # parameter(s).
                    range_expr_map = dict(inner_expr_map)
                    range_assumptions = []
                    for start_idx, param, new_param, range_start, range_end \
                            in zip(subbed_start, iter_params, new_params, 
                                   entry_start_vals, entry_end_vals):
                        shifted_param = Add(new_param, subtract(range_start, start_idx))
                        shifted_param = _simplifiedCoord(shifted_param, assumptions,
                                                         requirements)
                        range_expr_map[param] = shifted_param
                        # Include assumptions that the parameters are 
                        # in the proper range.
                        assumption = LessEq(start_idx, new_param)
                        range_assumptions.append(assumption)
                        assumption = InSet(subtract(new_param, start_idx), Naturals)
                        #assumption = LessEq(new_param,
                        #                    subtract(range_end, start_idx))
                        assumption = LessEq(new_param, range_end)
                        range_assumptions.append(assumption)
                    
                    # Perform the substitution.
                    # The fact that our "new parameters" are "safe" 
                    # alleviates the need to reserve anything extra.
                    range_lambda_body = iter_body.substituted(range_expr_map, 
                        relabelMap, reservedVars, 
                        inner_assumptions+range_assumptions, new_requirements)
                    # Any requirements that involve the new parameters 
                    # are a direct consequence of the iteration range 
                    # and are not external requirements:
                    new_requirements = \
                        [requirement for requirement in new_requirements 
                         if requirement.freeVars().isdisjoint(new_params)]
                    range_lambda_map = Lambda(new_params, range_lambda_body)
                    # Obtain the appropriate end indices.
                    end_indices = \
                        [_simplifiedCoord(subtract(range_end, start_idx), 
                                          assumptions, new_requirements) 
                         for start_idx, range_end in zip(subbed_start, 
                                                          entry_end_vals)]
                    entry = Iter(range_lambda_map, subbed_start, end_indices)
                # Set this entry in the entries array.
                ExprArray.set_entry(entries, entry_start_indices, entry)
                '''
            subbed_self = compositeExpression(entries)
        else:
            # No Indexed sub-Expressions whose variable is
            # replaced with a Composite, so let us not expand the
            # iteration.  Just do an ordinary substitution.
            new_requirements = []  # Fresh new requirements.
            subbed_map = self.lambda_map.substituted(exprMap, relabelMap,
                                                     reservedVars, assumptions,
                                                     new_requirements)
            subbed_self = Iter(subbed_map.parameters, subbed_map.body,
                               subbed_start, subbed_end)

        for requirement in new_requirements:
            # Make sure requirements don't use reserved variable in a
            # nested scope.
            requirement._restrictionChecked(reservedVars)
        if requirements is not None:
            requirements += new_requirements  # append new requirements

        return subbed_self
Ejemplo n.º 9
0
    def substituted(self,
                    exprMap,
                    relabelMap=None,
                    reservedVars=None,
                    assumptions=USE_DEFAULTS,
                    requirements=None):
        '''
        Return this expression with its variables substituted 
        according to subMap and/or relabeled according to relabelMap.
        The Lambda parameters have their own scope within the Lambda 
        body and do not get substituted.  They may be relabeled, however. 
        Substitutions within the Lambda body are restricted to 
        exclude the Lambda parameters themselves (these Variables 
        are reserved), consistent with any relabeling.
        '''
        from proveit import compositeExpression, Iter
        if (exprMap is not None) and (self in exprMap):
            # the full expression is to be substituted
            return exprMap[self]._restrictionChecked(reservedVars)
        if relabelMap is None: relabelMap = dict()
        assumptions = defaults.checkedAssumptions(assumptions)
        # Can't substitute the lambda parameter variables; they are in a new scope.
        innerExprMap = {
            key: value
            for (key, value) in exprMap.iteritems()
            if key not in self.parameterVarSet
        }
        # Can't use assumptions involving lambda parameter variables
        innerAssumptions = [
            assumption for assumption in assumptions
            if self.parameterVarSet.isdisjoint(assumption.freeVars())
        ]
        # Handle relabeling and variable reservations consistent with relabeling.
        innerReservations = dict() if reservedVars is None else dict(
            reservedVars)
        newParams = []

        for parameter, parameterVar in zip(self.parameters,
                                           self.parameterVars):
            # Note that lambda parameters introduce a new scope and don't need to,
            # themselves, be restriction checked.  But they generate new inner restrictions
            # that disallow any substitution from a variable that isn't in the new scope
            # to a variable that is in the new scope.
            # For example, we can relabel y to z in (x, y) -> f(x, y), but not f to x.
            if parameterVar in relabelMap:
                if isinstance(parameter, Iter):
                    # expanding an iteration.  For example: x_1, ..., x_n -> a, b, c, d
                    relabeledParams = parameter.substituted(
                        exprMap, relabelMap, reservedVars, assumptions,
                        requirements)
                    if len(relabeledParams) != len(relabelMap[parameterVar]):
                        raise ImproperSubstitution(
                            "Relabeling of iterated parameters incomplete: %d length expansion versus %d length substitution"
                            % (len(relabeledParams),
                               len(relabelMap[parameterVar])))
                else:
                    relabeledParams = compositeExpression(
                        relabelMap[parameterVar])
                for relabeledParam in relabeledParams:
                    newParams.append(relabeledParam)
                    innerReservations[relabeledParam] = parameterVar
            else:
                # can perform a substition in indices of a parameter iteration: x_1, ..., x_n
                newParams.append(
                    parameter.substituted(innerExprMap, relabelMap,
                                          reservedVars, assumptions,
                                          requirements))
                innerReservations[parameterVar] = parameterVar
        # the lambda body with the substitution:
        subbedBody = self.body.substituted(innerExprMap, relabelMap,
                                           innerReservations, innerAssumptions,
                                           requirements)
        # conditions with substitutions:
        subbedConditions = self.conditions.substituted(innerExprMap,
                                                       relabelMap,
                                                       innerReservations,
                                                       innerAssumptions,
                                                       requirements)
        try:
            newLambda = Lambda(newParams, subbedBody, subbedConditions)
        except TypeError as e:
            raise ImproperSubstitution(e.message)
        except ValueError as e:
            raise ImproperSubstitution(e.message)
        return newLambda