def test_replacer(self): value = object() t1 = textbuilder.Text("To be or not\n to be?\n", value) patches = make_regexp_patches( t1.get_text(), re.compile(r'be|to', re.I), lambda m: (m.group() + m.group()).upper()) t2 = textbuilder.Replacer(t1, patches) self.assertEqual(t2.get_text(), "TOTO BEBE or not\n TOTO BEBE?\n") self.assertEqual( t2.map_back_patch(make_patch(t2.get_text(), 0, 4, "xxx")), (t1.get_text(), value, Patch(0, 2, "To", "xxx"))) self.assertEqual( t2.map_back_patch(make_patch(t2.get_text(), 5, 9, "xxx")), (t1.get_text(), value, Patch(3, 5, "be", "xxx"))) self.assertEqual( t2.map_back_patch(make_patch(t2.get_text(), 18, 23, "xxx")), (t1.get_text(), value, Patch(14, 17, " to", "xxx"))) # Match the entire second line self.assertEqual( t2.map_back_patch(make_patch(t2.get_text(), 17, 29, "xxx")), (t1.get_text(), value, Patch(13, 21, " to be?", "xxx")))
def test_combiner(self): valueA, valueB = object(), object() t1 = textbuilder.Text("To be or not\n to be?\n", valueA) patches = make_regexp_patches( t1.get_text(), re.compile(r'be|to', re.I), lambda m: (m.group() + m.group()).upper()) t2 = textbuilder.Replacer(t1, patches) t3 = textbuilder.Text("That is the question", valueB) t4 = textbuilder.Combiner(["[", t2, t3, "]"]) self.assertEqual( t4.get_text(), "[TOTO BEBE or not\n TOTO BEBE?\nThat is the question]") self.assertEqual( t4.map_back_patch(make_patch(t4.get_text(), 1, 5, "xxx")), (t1.get_text(), valueA, Patch(0, 2, "To", "xxx"))) self.assertEqual( t4.map_back_patch(make_patch(t4.get_text(), 18, 30, "xxx")), (t1.get_text(), valueA, Patch(13, 21, " to be?", "xxx"))) self.assertEqual( t4.map_back_patch(make_patch(t4.get_text(), 0, 1, "xxx")), None) self.assertEqual( t4.map_back_patch(make_patch(t4.get_text(), 31, 38, "xxx")), (t3.get_text(), valueB, Patch(0, 7, "That is", "xxx")))
def make_formula_body(formula, default_value, assoc_value=None): """ Given a formula, returns a textbuilder.Builder object suitable to be the body of a function, with the formula transformed to replace `$foo` with `rec.foo`, and to insert `return` if appropriate. Assoc_value is associated with textbuilder.Text() to be returned by map_back_patch. """ if isinstance(formula, six.binary_type): formula = formula.decode('utf8') if not formula.strip(): return textbuilder.Text('return ' + repr(default_value), assoc_value) formula_builder_text = textbuilder.Text(formula, assoc_value) # Start with a temporary builder, since we need to translate "$" before we can parse the code at # all (namely, we turn '$foo' into 'DOLLARfoo' first). Once we can parse the code, we'll create # a proper set of patches. Note that we initially translate into 'DOLLARfoo' rather than # 'rec.foo', so that the translated entity is a single token: this makes for more precisely # reported errors if there are any. tmp_patches = textbuilder.make_regexp_patches(formula, DOLLAR_REGEX, 'DOLLAR') tmp_formula = textbuilder.Replacer(formula_builder_text, tmp_patches) # Parse the formula into an abstract syntax tree (AST), catching syntax errors. try: atok = asttokens.ASTTokens(tmp_formula.get_text(), parse=True) except SyntaxError as e: return textbuilder.Text(_create_syntax_error_code(tmp_formula, formula, e)) # Parse formula and generate error code on assignment to rec with use_inferences(InferRecAssignment): try: astroid.parse(tmp_formula.get_text()) except SyntaxError as e: return textbuilder.Text(_create_syntax_error_code(tmp_formula, formula, e)) # Once we have a tree, go through it and create a subset of the dollar patches that are actually # relevant. E.g. this is where we'll skip the "$foo" patches that appear in strings or comments. patches = [] for node in ast.walk(atok.tree): if isinstance(node, ast.Name) and node.id.startswith('DOLLAR'): input_pos = tmp_formula.map_back_offset(node.first_token.startpos) m = DOLLAR_REGEX.match(formula, input_pos) # If there is no match, then we must have had a "DOLLARblah" identifier that didn't come # from translating a "$" prefix. if m: patches.append(textbuilder.make_patch(formula, m.start(0), m.end(0), 'rec.')) # Wrap arguments to the top-level "IF()" function into lambdas, for lazy evaluation. This is # to ensure it's not affected by an exception in the unused value, to match Excel behavior. if isinstance(node, ast.Call) and isinstance(node.func, ast.Name): lazy_args_slice = LAZY_ARG_FUNCTIONS.get(node.func.id) if lazy_args_slice: for arg in node.args[lazy_args_slice]: start, end = map(tmp_formula.map_back_offset, atok.get_text_range(arg)) patches.append(textbuilder.make_patch(formula, start, start, 'lambda: (')) patches.append(textbuilder.make_patch(formula, end, end, ')')) # If the last statement is an expression that has its result unused (an ast.Expr node), # then insert a "return" keyword. last_statement = atok.tree.body[-1] if atok.tree.body else None if isinstance(last_statement, ast.Expr): input_pos = tmp_formula.map_back_offset(last_statement.first_token.startpos) patches.append(textbuilder.make_patch(formula, input_pos, input_pos, "return ")) elif last_statement is None: # If we have an empty body (e.g. just a comment), add a 'pass' at the end. patches.append(textbuilder.make_patch(formula, len(formula), len(formula), '\npass')) # Apply the new set of patches to the original formula to get the real output. final_formula = textbuilder.Replacer(formula_builder_text, patches) # Try parsing again before returning it just in case we have new syntax errors. These are # possible in cases when a single token ('DOLLARfoo') is valid but an expression ('rec.foo') is # not, e.g. `foo($bar=1)` or `def $foo()`. try: atok = asttokens.ASTTokens(final_formula.get_text(), parse=True) except SyntaxError as e: return textbuilder.Text(_create_syntax_error_code(final_formula, formula, e)) # We return the text-builder object whose .get_text() is the final formula. return final_formula
table_id = docmodel.aclResources.table.get_record( int(rule_rec.resource)).tableId elif entity.type == 'userAttrCol': table_id = user_attr_tables.get(entity.extra) else: continue col_id = entity.name new_col_id = col_renames_dict.get((table_id, col_id)) if not new_col_id: continue patch = textbuilder.make_patch( formula, entity.start_pos, entity.start_pos + len(entity.name), new_col_id) patches.append(patch) replacer = textbuilder.Replacer(textbuilder.Text(formula), patches) txt = replacer.get_text().encode('utf8') rule_updates.append((rule_rec, { 'aclFormula': txt, 'aclFormulaParsed': parse_acl_formula_json(txt) })) def do_renames(): useractions.doBulkUpdateFromPairs('_grist_ACLResources', resource_updates) useractions.doBulkUpdateFromPairs('_grist_ACLRules', rule_updates) return do_renames
def prepare_acl_col_renames(docmodel, useractions, col_renames_dict): """ Given a dict of column renames of the form {(table_id, col_id): new_col_id}, returns a callback that will apply updates to the affected ACL rules and resources. """ # Collect updates for ACLResources that refer to the renamed columns. resource_updates = [] for resource_rec in docmodel.aclResources.all: t = resource_rec.tableId if resource_rec.colIds and resource_rec.colIds != '*': new_col_ids = ','.join((col_renames_dict.get((t, c)) or c) for c in resource_rec.colIds.split(',')) if new_col_ids != resource_rec.colIds: resource_updates.append((resource_rec, { 'colIds': new_col_ids })) # Collect updates for any ACLRules with UserAttributes that refer to the renamed column. rule_updates = [] user_attr_tables = {} # Maps name of user attribute to its lookup table for rule_rec in docmodel.aclRules.all: if rule_rec.userAttributes: try: rule_info = json.loads(rule_rec.userAttributes) user_attr_tables[rule_info.get('name')] = rule_info.get( 'tableId') new_col_id = col_renames_dict.get( (rule_info.get("tableId"), rule_info.get("lookupColId"))) if new_col_id: rule_info["lookupColId"] = new_col_id rule_updates.append((rule_rec, { 'userAttributes': json.dumps(rule_info) })) except Exception as e: log.warn("Error examining aclRule: %s" % (e, )) # Go through again checking if anything in ACL formulas is affected by the rename. for rule_rec in docmodel.aclRules.all: if rule_rec.aclFormula: formula = rule_rec.aclFormula patches = [] for entity in parse_acl_grist_entities(rule_rec.aclFormula): if entity.type == 'recCol': table_id = docmodel.aclResources.table.get_record( int(rule_rec.resource)).tableId elif entity.type == 'userAttrCol': table_id = user_attr_tables.get(entity.extra) else: continue col_id = entity.name new_col_id = col_renames_dict.get((table_id, col_id)) if not new_col_id: continue patch = textbuilder.make_patch( formula, entity.start_pos, entity.start_pos + len(entity.name), new_col_id) patches.append(patch) replacer = textbuilder.Replacer(textbuilder.Text(formula), patches) txt = replacer.get_text() rule_updates.append((rule_rec, { 'aclFormula': txt, 'aclFormulaParsed': parse_acl_formula_json(txt) })) def do_renames(): useractions.doBulkUpdateFromPairs('_grist_ACLResources', resource_updates) useractions.doBulkUpdateFromPairs('_grist_ACLRules', rule_updates) return do_renames
def indent(body, levels=1): """Indents all lines in body (which should be a textbuilder.Builder), except empty ones.""" patches = textbuilder.make_regexp_patches(body.get_text(), indent_line_re, indent_str * levels) return textbuilder.Replacer(body, patches)