def makeLiteral(string_format, latex_format, extra_core_info, context): ''' Make the DigitLiteral that matches the core information. ''' from proveit import Context assert context==Context(__file__), 'Expecting a different Context for a DigitLiteral' n = int(extra_core_info[0]) return Numeral(n, string_format, latex_format)
def fix_context(context_path): mode = None with open(os.path.join(context_path, '_sub_contexts_.txt')) as f: sub_context_names = [] for k, line in enumerate(f.readlines()): if k == 0 and line[:6] == 'mode: ': mode = line[6:].strip() else: sub_context_names.append(line.strip()) Context(context_path).setSubContextNames(sub_context_names) if mode is not None: with open(os.path.join(context_path, '_mode_.txt'), 'w') as fw: fw.write(mode + '\n')
def __init__(self, operator_or_operators, operand_or_operands, styles=dict(), requirements=tuple()): ''' Create an operation with the given operator(s) and operand(s). The operator(s) must be Label(s) (a Variable or a Literal). When there is a single operator, there will be an 'operator' attribute. When there is a single operand, there will be an 'operand' attribute. In any case, there will be 'operators' and 'operands' attributes that bundle the one or more Expressions into a composite Expression. ''' from proveit._core_.expression.composite import Composite, compositeExpression, singleOrCompositeExpression, Iter, Indexed from proveit._core_.expression.label.label import Label from proveit import Context if hasattr(self.__class__, '_operator_') and operator_or_operators==self.__class__._operator_: operator = operator_or_operators context = Context(inspect.getfile(self.__class__)) if Expression.contexts[operator] != context: raise OperationError("Expecting '_operator_' Context to match the Context of the Operation sub-class. Use 'context=__file__'.") self.operator_or_operators = singleOrCompositeExpression(operator_or_operators) self.operand_or_operands = singleOrCompositeExpression(operand_or_operands) if isinstance(self.operator_or_operators, Composite): # a composite of multiple operators: self.operators = self.operator_or_operators for operator in self.operators: if isinstance(operator, Iter): if not isinstance(operator.lambda_map.body, Indexed): raise TypeError('operators must be Labels, Indexed variables, or iteration (Iter) over Indexed variables.') elif not isinstance(operator, Label) and not isinstance(operator, Indexed): raise TypeError('operator must be a Label, Indexed variable, or iteration (Iter) over Indexed variables.') else: # a single operator self.operator = self.operator_or_operators if not isinstance(self.operator, Label) and not isinstance(self.operator, Indexed): raise TypeError('operator must be a Label, Indexed variable, or iteration (Iter) over Indexed variables.') # wrap a single operator in a composite for convenience self.operators = compositeExpression(self.operator) if isinstance(self.operand_or_operands, Composite): # a composite of multiple operands self.operands = self.operand_or_operands else: # a single operand self.operand = self.operand_or_operands # wrap a single operand in a composite for convenience self.operands = compositeExpression(self.operand) if 'operation' not in styles: styles['operation'] = 'normal' # vs 'function if 'wrapPositions' not in styles: styles['wrapPositions'] = '()' # no wrapping by default if 'justification' not in styles: styles['justification'] = 'center' Expression.__init__(self, ['Operation'], [self.operator_or_operators, self.operand_or_operands], styles=styles, requirements=requirements)
def build(context_paths, all_paths, just_execute_proofs=False): ''' Build all Context-related notebooks (_common_, _axioms_, _theorems_, and proof notebooks for the theorems) in the context_paths. For the paths in all_paths (which should include the context paths), build any contained notebooks and any of the expr.ipynb and dependencies.ipnyb notebooks within the __pv_it directory (storing Prove-It "database" information). ''' if not just_execute_proofs: # Make sure there is a _common_.py in each context directory. # These will be useful in figuring out dependencies between _common_ # notebooks (see CommonExpressions in proveit._core_.context for context_path in context_paths: Context(context_path).makeSpecialExprModule('common') # Execute the _context_ notebooks in each context directory as # needed and generate _context_.html. for context_path in context_paths: fix_context(context_path) context_notebook_path = os.path.join(context_path, '_context_.ipynb') with open(os.path.join(context_path, '_mode_.txt')) as f: prev_mode = f.read().strip() if prev_mode == 'interactive': # re-execute to get switch it to static mode executeAndExportNotebook(context_notebook_path) else: # don't re-execute the notebook because it is already in # static mode; just export it to HTML. exportToHTML(context_notebook_path) # Next, run the _common_.ipynb (common expression) notebooks for the contexts. # For any that depend up _common_.py of other contexts, run the # requirements first. common_nb_queue = list(context_paths) exececuted_common_nb = set() while len(common_nb_queue) > 0: context_path = common_nb_queue.pop(0) if context_path in exececuted_common_nb: continue # The failed_common_import.txt file is used to communicate a failed # common expression import from another context. First erase this # file, then see if it is created after executing the common notebook. failed_common_import_filename = os.path.join( context_path, '__pv_it', 'failed_common_import.txt') if os.path.isfile(failed_common_import_filename): os.remove(failed_common_import_filename) try: revise_special_notebook( os.path.join(context_path, '_common_.ipynb')) executeAndExportNotebook( os.path.join(context_path, '_common_.ipynb')) exececuted_common_nb.add(context_path) # finished successfully except execute.CellExecutionError as e: retry = False if os.path.isfile(failed_common_import_filename): # A failed_common_import.txt file was created. It will indicate the # context from which a common expression was attempted to be imported. # If its _common_ notebook has not already executed, execute it first # and then try to execute this one again. with open(failed_common_import_filename, 'r') as f: required_context_name = f.read().strip() if required_context_name not in exececuted_common_nb: print ' Failed to execute; try a prerequisite first:', required_context_name common_nb_queue.insert( 0, context_path) # re-insert to try again # but first execute the _common_ notebook from the required_context. common_nb_queue.insert( 0, context_map[required_context_name]) retry = True if not retry: raise e # Next, run _axioms_.ipynb and _theorems_.ipynb notebooks for the contexts. # The order does not matter assuming these expression constructions # do not depend upon other axioms or theorems (but possibly common expressions). for context_path in context_paths: revise_special_notebook( os.path.join(context_path, '_axioms_.ipynb')) revise_special_notebook( os.path.join(context_path, '_theorems_.ipynb')) executeAndExportNotebook( os.path.join(context_path, '_axioms_.ipynb')) executeAndExportNotebook( os.path.join(context_path, '_theorems_.ipynb')) # Get the proof notebook filenames for the theorems in all of the contexts. proof_notebook_theorems = dict( ) # map proof notebook names to corresponding Theorem objects. proof_notebooks = [] for context_path in context_paths: context = Context(context_path) for theorem_name in context.theoremNames(): theorem = context.getTheorem(theorem_name) proof_notebook_name = context.proofNotebook( theorem_name, theorem.provenTruth.expr) proof_notebook_theorems[proof_notebook_name] = theorem proof_notebooks.append(proof_notebook_name) # Next, for each of the theorems, record the "presuming" information # of the proof notebooks in the _proofs_ folder. Do this before executing # any of the proof notebooks to account for dependencies properly # (avoiding circular dependencies as intended). for proof_notebook in proof_notebooks: print 'record presuming info:', proof_notebook recordPresumingInfo(proof_notebook_theorems[proof_notebook], proof_notebook) # Next, execute all of the proof notebooks for each context. # the order is not important since we know the dependencies via # the "presuming" information from the previous step. for proof_notebook in proof_notebooks: executeAndExportNotebook(proof_notebook) # Next, run any other notebooks within path/context directories # (e.g., with tests and demonstrations). for path in all_paths: for sub in os.listdir(path): full_path = os.path.join(path, sub) if os.path.isfile(full_path) and os.path.splitext( full_path)[1] == '.ipynb': if sub[0] != '_': executeAndExportNotebook(full_path) # Lastly, run expr.ipynb and dependencies.ipynb within the hash directories # of the __pv_it folders for each context. # May require multiple passes (showing expression info may generate # expr.ipynb notebooks for sub-expressions). executed_hash_paths = set( ) # hash paths whose notebooks have been executed while True: # repeat until there are no more new notebooks to process prev_num_executed = len(executed_hash_paths) for path in all_paths: pv_it_dir = os.path.join(path, '__pv_it') if os.path.isdir(pv_it_dir): for hash_directory in os.listdir(pv_it_dir): hash_path = os.path.join(pv_it_dir, hash_directory) if os.path.isdir(hash_path): if hash_path in executed_hash_paths: continue # already executed this case expr_notebook = os.path.join(hash_path, 'expr.ipynb') if os.path.isfile(expr_notebook): # execute the expr.ipynb notebook executeAndExportNotebook(expr_notebook) executed_hash_paths.add(hash_path) # done dependencies_notebook = os.path.join( hash_path, 'dependencies.ipynb') if os.path.isfile(dependencies_notebook): # execute the dependencies.ipynb notebook executeAndExportNotebook(dependencies_notebook) if len(executed_hash_paths) == prev_num_executed: break # no more new ones to process
nargs='*', default=default_paths, help= 'paths to be processed; sub-contexts will be included recursively (default: %s)' % ' '.join(default_paths)) args = parser.parse_args() paths = args.path # Get all the contexts of the given top-level paths # in the order indicated in _sub_context_.txt files. context_paths = [] context_map = dict() # map Context names to paths for path in paths: for context_path in findContextPaths(path): context_paths.append(context_path) context_map[Context(context_path).name] = context_path all_paths = list(context_paths) all_paths += [path for path in paths if path not in context_paths] if args.clean: # clean all of the __pv_it directories that may be auto-generated print "Cleaning '__pv_it' directories..." sys.stdout.flush() for path in all_paths: # remove the __pv_it folders from all paths pv_it_dir = os.path.join(path, '__pv_it') if os.path.isdir(pv_it_dir): shutil.rmtree(pv_it_dir) else: # remove the __pv_it/commons.pv_it in all of the context paths
def build(execute_processor, context_paths, all_paths, no_execute=False, just_execute_essentials=False, just_execute_proofs=False, just_execute_demos=False, just_execute_expression_nbs=False): ''' Build all Context-related notebooks (_common_, _axioms_, _theorems_, and proof notebooks for the theorems) in the context_paths. For the paths in all_paths (which should include the context paths), build any contained notebooks and any of the expr.ipynb and dependencies.ipnyb notebooks within the __pv_it directory (storing Prove-It "database" information). ''' if not just_execute_proofs and not just_execute_demos and not just_execute_expression_nbs: if not just_execute_essentials: # Generate html pages from index.ipynb and guide.ipynb in the packages folder: if no_execute: exportToHTML('index.ipynb') exportToHTML('guide.ipynb') else: executeAndExportNotebook(execute_processor, 'index.ipynb') executeAndExportNotebook(execute_processor, 'guide.ipynb') # Make sure there is a _common_.py, _axioms_.py, and _theorems_.py # in each context directory. # These will be useful in figuring out dependencies between _common_ # notebooks (see CommonExpressions in proveit._core_.context as well # as avoiding import errors. for context_path in context_paths: for spec_expr_kind in ('common', 'axioms', 'theorems'): Context(context_path).makeSpecialExprModule(spec_expr_kind) # Execute the _context_ notebooks in each context directory # and generate _context_.html. for context_path in context_paths: fix_context(context_path) context_notebook_path = os.path.join(context_path, '_context_.ipynb') with open(os.path.join(context_path, '_mode_.txt'), 'wt') as f: f.write('interactive\n') # when executed again, it will toggle to 'static' mode # execute into static mode executeAndExportNotebook(execute_processor, context_notebook_path, no_execute=no_execute) # Next, run the _common_.ipynb (common expression) notebooks for the contexts. # For any that depend up _common_.py of other contexts, run the # requirements first. """ common_nb_queue = list(context_paths) exececuted_common_nb = set() while len(common_nb_queue) > 0: context_path = common_nb_queue.pop(0) if context_path in exececuted_common_nb: continue # The failed_common_import.txt file is used to communicate a failed # common expression import from another context. First erase this # file, then see if it is created after executing the common notebook. failed_common_import_filename = os.path.join(context_path, '__pv_it', 'failed_common_import.txt') if os.path.isfile(failed_common_import_filename): os.remove(failed_common_import_filename) try: revise_special_notebook(os.path.join(context_path, '_common_.ipynb')) executeAndExportNotebook(os.path.join(context_path, '_common_.ipynb')) exececuted_common_nb.add(context_path) # finished successfully except execute.CellExecutionError as e: retry = False if os.path.isfile(failed_common_import_filename): # A failed_common_import.txt file was created. It will indicate the # context from which a common expression was attempted to be imported. # If its _common_ notebook has not already executed, execute it first # and then try to execute this one again. with open(failed_common_import_filename, 'r') as f: required_context_name = f.read().strip() if required_context_name not in exececuted_common_nb: print ' Failed to execute; try a prerequisite first:', required_context_name common_nb_queue.insert(0, context_path) # re-insert to try again # but first execute the _common_ notebook from the required_context. common_nb_queue.insert(0, context_map[required_context_name]) retry = True if not retry: raise e """ if no_execute: for context_path in context_paths: #revise_special_notebook(os.path.join(context_path, '_common_.ipynb')) exportToHTML(os.path.join(context_path, '_common_.ipynb')) else: # execute the commons notebooks first, and do this twice to work out inter-dependencies for _ in range(2): for context_path in context_paths: #revise_special_notebook(os.path.join(context_path, '_common_.ipynb')) execute_processor.executeNotebook(os.path.join(context_path, '_common_.ipynb')) # one last time to eliminate "expression notebook ... updated" messages and we'll export to html for context_path in context_paths: #revise_special_notebook(os.path.join(context_path, '_common_.ipynb')) executeAndExportNotebook(execute_processor, os.path.join(context_path, '_common_.ipynb')) # Next, run _axioms_.ipynb and _theorems_.ipynb notebooks for the contexts. # The order does not matter assuming these expression constructions # do not depend upon other axioms or theorems (but possibly common expressions). # do this twice to get rid of extraneous information about adding/removing from database if no_execute: for context_path in context_paths: exportToHTML(os.path.join(context_path, '_axioms_.ipynb')) exportToHTML(os.path.join(context_path, '_theorems_.ipynb')) else: for context_path in context_paths: #revise_special_notebook(os.path.join(context_path, '_axioms_.ipynb')) #revise_special_notebook(os.path.join(context_path, '_theorems_.ipynb')) execute_processor.executeNotebook(os.path.join(context_path, '_axioms_.ipynb')) execute_processor.executeNotebook(os.path.join(context_path, '_theorems_.ipynb')) # the second time we'll export to html for context_path in context_paths: #revise_special_notebook(os.path.join(context_path, '_axioms_.ipynb')) #revise_special_notebook(os.path.join(context_path, '_theorems_.ipynb')) executeAndExportNotebook(execute_processor, os.path.join(context_path, '_axioms_.ipynb')) executeAndExportNotebook(execute_processor, os.path.join(context_path, '_theorems_.ipynb')) if not just_execute_expression_nbs and not just_execute_demos: # Get the proof notebook filenames for the theorems in all of the # contexts. # Map proof notebook names to corresponding Theorem objects: proof_notebook_theorems = dict() theorem_proof_notebooks = [] # Turn off automation while loading theorems simply for recording # dependencies: #proveit.defaults.automation = False print("Finding theorem proof notebooks.") for context_path in context_paths: context = Context(context_path) for theorem_name in context.theoremNames(): start_time = time.time() print("Loading", theorem_name, end='', flush=True) theorem = context.getTheorem(theorem_name) proof_notebook_name = context.thmProofNotebook(theorem_name, theorem.provenTruth.expr) proof_notebook_theorems[proof_notebook_name] = theorem theorem_proof_notebooks.append(proof_notebook_name) print("; finished in %0.2f seconds"%(time.time()-start_time)) # Turn automation back on: #proveit.defaults.automation = True ''' # Some proveit modules may not have loaded properly while # automation was off, so we need to reset and reload it. proveit.reset() for k,v in list(sys.modules.items()): if k.startswith('proveit'): if k=='proveit' or k.startswith('proveit._core_'): # Don't reload proveit or proveit._core_. continue print('reload', v) importlib.reload(v) ''' if no_execute: if not just_execute_essentials: for proof_notebook in theorem_proof_notebooks: #if not os.path.isfile(proof_notebook[-5:] + '.html'): # temporary exportToHTML(proof_notebook) else: # Next, for each of the theorems, record the "presuming" information # of the proof notebooks in the _proofs_ folder. Do this before executing # any of the proof notebooks to account for dependencies properly # (avoiding circular dependencies as intended). print("Recording theorem dependencies.") for proof_notebook in theorem_proof_notebooks: recordPresumingInfo(proof_notebook_theorems[proof_notebook], proof_notebook) if not just_execute_essentials: # Next, execute all of the proof notebooks twice # to ensure there are no circular logic violations. for proof_notebook in theorem_proof_notebooks: execute_processor.executeNotebook(proof_notebook) for proof_notebook in theorem_proof_notebooks: executeAndExportNotebook(execute_processor, proof_notebook) if not just_execute_essentials and not just_execute_expression_nbs and not just_execute_proofs: # Next, run any other notebooks within path/context directories # (e.g., with tests and demonstrations). for path in all_paths: for sub in os.listdir(path): full_path = os.path.join(path, sub) if os.path.isfile(full_path) and os.path.splitext(full_path)[1] == '.ipynb': if sub == '_demonstrations_.ipynb' or sub[0] != '_': executeAndExportNotebook(execute_processor, full_path, no_execute=no_execute) if not just_execute_essentials and not just_execute_proofs and not just_execute_demos: # Lastly, run expr.ipynb, proof.ipynb, and dependencies.ipynb within # the hash directories of the __pv_it folders for each context. # May require multiple passes (showing expression info may generate # expr.ipynb notebooks for sub-expressions). executed_hash_paths = set() # hash paths whose notebooks have been executed while True: # repeat until there are no more new notebooks to process prev_num_executed = len(executed_hash_paths) for path in all_paths: pv_it_dir = os.path.join(path, '__pv_it') if os.path.isdir(pv_it_dir): for hash_directory in os.listdir(pv_it_dir): hash_path = os.path.join(pv_it_dir, hash_directory) if os.path.isdir(hash_path): if hash_path in executed_hash_paths: continue # already executed this case expr_html = os.path.join(hash_path, 'expr.html') expr_notebook = os.path.join(hash_path, 'expr.ipynb') if os.path.isfile(expr_notebook): if no_execute: exportToHTML(expr_notebook) else: # if expr_html doesn't exist or is older than expr_notebook, generate it if not os.path.isfile(expr_html) or os.path.getmtime(expr_html) < os.path.getmtime(expr_notebook): # execute the expr.ipynb notebook executeAndExportNotebook(execute_processor, expr_notebook) executed_hash_paths.add(hash_path) # done proof_html = os.path.join(hash_path, 'proof.html') proof_notebook = os.path.join(hash_path, 'proof.ipynb') if os.path.isfile(proof_notebook): if no_execute: exportToHTML(proof_notebook) else: # if proof_html doesn't exist or is older than proof_notebook, generate it if not os.path.isfile(proof_html) or os.path.getmtime(proof_html) < os.path.getmtime(proof_notebook): # execute the proof.ipynb notebook executeAndExportNotebook(execute_processor, proof_notebook) executed_hash_paths.add(hash_path) # done # always execute the dependencies notebook for now to be safes dependencies_notebook = os.path.join(hash_path, 'dependencies.ipynb') if os.path.isfile(dependencies_notebook): # execute the dependencies.ipynb notebook executeAndExportNotebook(execute_processor, dependencies_notebook, no_execute=no_execute) if len(executed_hash_paths) == prev_num_executed: break # no more new ones to process