def script2gist(input_stream): print input_stream lexer = GistScriptLexer(input_stream) token_stream = CommonTokenStream(lexer) parser = GistScriptParser(token_stream) tree = parser.gistscript() lisp_tree_str = tree.toStringTree(recog=parser) print(lisp_tree_str) walker = ParseTreeWalker() listner = GistScriptListener() walker.walk(listner, tree) root = etree.XML('<?xml version="1.0" ?>' + listner.output) pretty_print(root) alternatives(root) newroot = xslt(root, 'xslt/script2gist.xsl') print "###### script2gist ######" pretty_print(newroot) name_temporarys(newroot) #name_unmapped(newroot) # print "###### name temps ######" # pretty_print(newroot) newroot = xslt(newroot, 'xslt/script2gist2.xsl') write(newroot, 'graph.xml') return newroot
def find_portals(maze): portals = {} for loc in maze: if maze[loc].isupper(): for dir in get_open_dirs(maze, loc): next_loc = get_next_loc(loc, dir) if maze[next_loc].isupper(): letters = [maze[loc], maze[next_loc]] pname = ''.join(sorted(letters)) test_loc = get_next_loc(next_loc, dir) if maze.get(test_loc) == '.': ploc = test_loc else: ploc = get_next_loc(loc, reverse_dir(dir)) # print('Found portal', pname, 'at loc', ploc) if not pname in portals: portals[pname] = set() portals[pname].add(ploc) util.pretty_print(portals) jumps = {} for pname, locs in portals.items(): locs = list(locs) if len(locs) == 2: jumps[locs[0]] = {'dst': locs[1], 'name': pname, 'used': []} jumps[locs[1]] = {'dst': locs[0], 'name': pname, 'used': []} return portals, jumps
def remove_key_paths(all_paths, key): # Remove paths to the current key temp = all_paths.copy() for key_pair, v in temp.items(): if key_pair[0] == key or key_pair[1] == key: del all_paths[key_pair] util.pretty_print(all_paths) return all_paths
def collegeAnalysis(sc, percentage, tresholds, bonus = None, category="", normalizer = False): spark_context = sc player2Score = [] if not os.path.isfile('res_' + category + '.tsv'): player2Score = analyze(sc, percentage, tresholds, bonus = bonus, normalizer = normalizer) else: with open('res_' + category + '.tsv') as playerFile: player2Score = csv.reader(playerFile, delimiter='\t') college2score = player2Score.map(lambda (player, score): collegeScore(player, score)).reduceByKey(lambda (score1,one1), (score2,one2): (score1+score2,one1+one2)).collect() util.pretty_print(util.normalize_scores_college(100, college2score))
def analyze(sc, percentage, tresholds, out = False, bonus = None, normalizer = False): spark_context = sc parallel_players = [] if spark_context.getConf().get("provider") == 'mongo': parallel_players = spark_context.mongoRDD('mongodb://' + spark_context.getConf().get('mongo_host') + ':27017/basketball_reference.basketball_reference') if spark_context.getConf().get("provider") == 'redis': limit = spark_context.getConf().get('limit') parallel_players = splitRedisRecord(limit, spark_context) f = lambda player: score4Player(player, percentage, tresholds, bonus, normalizer) scores = parallel_players.map(f) if out: util.pretty_print(util.normalize_scores(100,scores.collect())) else: return scores
def Lst(up, dst, output=True): err = False ret = None try: ret = up.getlist(dst) except: err = True if err and output: print "List Error" if not err and output: pretty_print(ret) return err
def generate_equivalency(formula1, formula2, adjacency): try: equal, parse1, parse2, original = logical_equivalency.runner(formula1, formula2, adjacency) except (SyntaxError, TypeError) as exception: print(str(exception)) exit() final_steps1 = [] for steps, step_type in parse1[1]: final_steps1.append([util.pretty_print(steps[-1]), steps[:-1], util.StepTypes.get_message(step_type)]) final_steps2 = [] for steps, step_type in parse2[1]: final_steps2.append([util.pretty_print(steps[-1]), steps[:-1], util.StepTypes.get_message(step_type)]) return (equal, str(parse1[0]), str(parse2[0]), parse1[2], parse2[2], original)
def to_str(self, args: util.PrettyPrintArgs = None) -> str: possible = None try: # noinspection PyUnresolvedReferences possible = self._possible except AttributeError: pass return self._str_header(args.detail_rule) + "\n" + util.pretty_print( self.rows, self.cols, self.max_elem, self._known, possible, args)
def generate_equivalency(): formula1 = request.form['formula1'] formula2 = request.form['formula2'] form = Markup(render_template('form.html', formula1=formula1, formula2=formula2)) try: equal, parse1, parse2 = logical_equivalency.runner(formula1, formula2) except (SyntaxError, TypeError) as exception: return render_template('error.html', error=str(exception), form=form) final_steps1 = [] for steps, step_type in parse1[1]: final_steps1.append([util.pretty_print(steps[-1]), steps[:-1], util.StepTypes.get_message(step_type)]) final_steps2 = [] for steps, step_type in parse2[1]: final_steps2.append([util.pretty_print(steps[-1]), steps[:-1], util.StepTypes.get_message(step_type)]) return render_template('show.html', equal=equal, form=form, steps1=final_steps1, final_form1=parse1[0], steps2=final_steps2, final_form2=parse2[0])
def __getattr__(self, key): """ Make attempts to lookup by nonexistent attributes also attempt key lookups. """ if self.has_key(key): return self[key] frame = sys._getframe(1) if '\x00%c' % dis.opmap['STORE_ATTR'] in frame.f_code.co_code: self[key] = DotDict() return self[key] raise AttributeError("Key '%s' not found in:\n%s" % (key, pretty_print(self)))
def find_all_paths(maze): all_paths = {} # First create a hash of the current loc and all keys and doors items = {'@': get_current_loc(maze)} all_keys = set() for loc, v in maze.items(): if re_key.match(v): items[v] = loc all_keys.add(v) # Next, find path lengths from each item to other items for item, loc in items.items(): paths = find_key_paths(maze, loc) for k, v in paths.items(): if k != item: all_paths[(k,item)] = {} all_paths[(k,item)]['steps'] = v['steps'] all_paths[(k,item)]['needs'] = [x.lower() for x in v['doors']] all_paths[(item,k)] = all_paths[(k,item)] util.pretty_print(all_paths) return all_paths, all_keys
def process_converter(filename, form_xml, debug, cl_path): if not filename: return None if form_xml: form = form_xml.xpath('/CALC/FORMSET/FORM/@val')[0].upper() preprocessed = preprocess_calc_file(filename, debug=debug, cl_path=cl_path) input_stream = InputStream(preprocessed) tree = antrl_parse(input_stream, debug) xml = tree2xml(tree) if debug: print "##### parse converter: %s #####" % filename pretty_print(xml) withforms = xml.xpath('//WithForms[ID[1][@val="%s"]]' % form) if not withforms: return withforms = withforms[0] fedform = withforms.xpath("ID[2]/@val")[0].upper() for each in withforms.xpath(".//Assign"): each.set('fed', '1') id = each.getchildren()[0] id.set('val', "%s.%s" % (form, id.get('val').upper())) for id in each.getchildren()[1].xpath('.//ID'): id.set('val', "%s.%s" % (fedform, id.get('val').upper())) id.set('fed','1') #Cleanup boolean\literal assigns for each in withforms.xpath(".//IfStruct/Assign[Boolean|Literal]"): each.getparent().remove(each) if debug: print "##### withforms #####" pretty_print(withforms) form_xml.append(withforms)
def convert(input_stream): lexer = CalcLexer(input_stream) token_stream = CommonTokenStream(lexer) parser = CalcParser(token_stream) tree = parser.calcfile() lisp_tree_str = tree.toStringTree(recog=parser) print(lisp_tree_str) walker = ParseTreeWalker() listner = CalcListener() walker.walk(listner, tree) root = etree.XML('<?xml version="1.0" ?>' + listner.output) print "##ParseTreeWalker##" pretty_print(root) resolve_vars(root, use_tke='-tps' not in sys.argv) assign_ids(root) accumulations(root) difference(root) multiplications(root) multicopy_accumulation(root) # print "## RESOLVE VARS ##" # pretty_print(root) section = root.xpath('/CALC/Section/@val')[0] root = xslt(root, 'xslt/calc2script.xsl') print "## calc2script ##" pretty_print(root) clean_temps(root) # print "## CleanTemps ##" # pretty_print(root) root = xslt(root, 'xslt/calc2script2.xsl') print "## calc2script2 ##" pretty_print(root) text = xslt_text(root, 'xslt/calc2script3.xsl') print "## calc2script3 ##" print text return text
def print_stats_for(df, index, colname): col = list(filter(lambda x: x['name'] == colname, index))[0] name, _type = col['name'], col['type'] stats = calculate_col_stats(df[name], name, _type) pretty_print(stats) return stats
""" Method which contains the text. :return: A string representing the future list of words. """ return "Now is the winter of our discontent" \ " Made glorious summer by this sun of York;" \ " And all the clouds that lour'd upon our house " \ "In the deep bosom of the ocean buried. " def filter_text(string): """ Method which splits the given string into words. Checks which elements have length greater than 4 and creates a list with those. :param string: A string formed of words. :return: A list formed of words from that string with length greater or equal than 4. """ words = string.split(' ') filtered = filter(lambda x: len(x) >= 4, words) return filtered if __name__ == "__main__": print("WORDS WITH LENGTH >= 4: \n") util.pretty_print(filter_text(text()))
import json from api_client import parser, client, content from util import pretty_print parser.add_argument('path', type=str, help="Path of content to export") args = parser.parse_args() api_client = client.ApiClient(args.access_id, args.access_key, args.endpoint) content_api = content.ContentManagementApi(api_client) content_id = content_api.find_id_by_path(args.path) exported_content = content_api.export(content_id) pretty_print(exported_content)
def solve_maze_old(input): maze = text_to_maze(input) all_paths, all_keys = find_all_paths(maze) test = {} for key_pair, v in all_paths.items(): n = ''.join(v['needs']) print(key_pair, n) if len(n) == 14: test[key_pair] = (n, len(n)) # if n not in test: # test[n] = 0 # test[n] += 1 # util.pretty_print(util.sort_by_value(test)) util.pretty_print([(k, x) for k, x in test.items()]) # exit(0) loc = get_current_loc(maze) maze[loc] = '.' total_steps = 0 keyring = set() curr_key = '@' while True: keyring.add(curr_key) # Find open paths open_keys = [] for key_pair, v in all_paths.items(): if key_pair[0] == curr_key and key_pair[1] not in keyring: needs_met = True for n in v['needs']: if n not in keyring: needs_met = False if needs_met: open_keys.append(key_pair[1]) all_paths = remove_key_paths(all_paths, curr_key) # Handle open paths print('Open keys:', open_keys) if len(open_keys) == 0: print('Total steps:', total_steps) return total_steps elif len(open_keys) == 1: key = open_keys[0] keyring.add(key) print('Got key', key) curr_key = key else: print('Len > 1') exit(1) exit(0) maze = text_to_maze(input) loc = get_current_loc(maze) key_paths = find_key_paths(maze, loc) util.pretty_print(key_paths) exit(0) all_paths, all_keys = find_all_paths(maze) perms = permutations(all_keys) min_steps = None for p in list(perms): prev_key = '@' total_steps = 0 needs_met = True for i, key in enumerate(p): # First check if all prerequisite keys have been collected needs = all_paths[(prev_key, key)]['needs'] for n in needs: if n not in p[0:i]: print(key, 'requires', n) needs_met = False if not needs_met: continue # If so, add the path steps to the total steps = all_paths[(prev_key, key)]['steps'] print(prev_key, 'to', key, '=', steps) total_steps += steps prev_key = key print('Permutation', p, '=', total_steps, 'needs met:', needs_met) if not needs_met: continue if min_steps is None or total_steps < min_steps: min_steps = total_steps print('Min steps:', min_steps) return min_steps
print json.dumps(settings,indent=4) print "\n\n" preprocessed = preprocess_calc_file(settings['calc_filename'], debug=debug, cl_path=settings['cl_path']) print "\n\n\n" input_stream = InputStream(preprocessed) tree = antrl_parse(input_stream, debug) root = tree2xml(tree) if debug: print "##ParseTreeWalker##" pretty_print(root) process_converter(settings['cvt_filename'], form_xml=root, debug=debug, cl_path=settings['cl_path']) resolve_vars(root, use_tke=False) fix_self_referencing_assigns(root) if debug: print "##ParseTreeWalker##" pretty_print(root) root = xslt(root, 'tps2xref.xsl') create_or_blocks(root)
return False is_logged = has_logged('__init__.py') print(is_logged) is_logged = has_logged('__init__.py') print(is_logged) # json read with open('../config/apps.json') as f: json_data = json.load(f) print(type(json_data)) print(type(json_data[0])) print(type(json_data[1])) pretty_print(json_data) # class class LogLine: """ Represent as object: 03/18/2020 18:16:35.866: Java is being overtaken by python """ def __init__(self, line): self.timestamp = None self.message = None self.parse_line(line) def parse_line(self, line): """Split timestamp and message from line and store in object"""
class LogQuery(object): def __init__(self, data, query): self.data = data self.query = query try: self.ast = parser.parse(query) except NoTokenError, e: print "ERROR: %s" % e.message print query return except SyntaxError: return if DEBUG: # pretty-printer sq = str(self.ast) pretty_print(sq) print sq print '-'*screen.width pass def run(self): start_time = time.time() op_data = sqlfuncs.do(self.ast, self.data[:]) # COPY!!! response = OrderedDict() for row in op_data: for key in row.keys(): if not response.has_key(key): response[key] = [] response[key].append(row[key]) Table(response, start_time).prnt()
tree = antrl_parse(input_stream, debug) root = tree2xml(tree, debug) for each in root.xpath('//CONCAT'): each.set('val', "__".join(each.xpath("ArgList/*[2]/@val"))) each.tag = 'String' each.remove(each.getchildren()[0]) with open('output/preparsed.xml', 'w') as f: f.write(etree.tostring(root, pretty_print = True)) if debug: print "##ParseTreeWalker##" pretty_print(root) ptformset_xml = etree.parse(settings['ptformset_xml_path']) ### Clean up procedure calls main = root.xpath("/CALC/Section[MainDecl]")[0] main.getparent().remove(main) for proc in root.xpath("//ProcedureID"): node = root.xpath("//%s" % proc.get("val").upper()) if node: print "Shared Procedure: ", proc.get("val") proc.getparent().tag = "CALLED_PROCEDURE" try: proc=proc.xpath("../WithNewTag|../*[.//WithNewTag]")[0]
""" celsius = [6, 11, 9.5, 15, 22, 27, 24, 30.3, 37.5, 44] fahrenheit = map(lambda c: round((9 / 5) * c + 32, 2), celsius) return fahrenheit def convert_fahrenheit(): """ Method to convert fahrenheit to celsius. Takes a list of numbers representing fahrenheit temperatures and uses lambda expression to map the celsius converting formula to the fahrenheit list. :return: a list of celsius degrees representing the converted fahrenheit numbers. """ fahrenheit = [37, 29, 40, 58.6, 20, 68, 21.5, 0, 77, 34] celsius = map(lambda f: round((5 / 9) * (f - 32), 2), fahrenheit) return celsius if __name__ == "__main__": print("\nCELSIUS TO FAHRENHEIT: \n") util.pretty_print(convert_celsius()) print("\n\nFAHRENHEIT TO CELSIUS: \n") util.pretty_print(convert_fahrenheit())
from copy import deepcopy import forseti.parser import util def runner(formula1, formula2): formula1 = forseti.parser.parse(formula1) formula2 = forseti.parser.parse(formula2) statement1, steps1 = util.convert(deepcopy(formula1)) statement2, steps2 = util.convert(deepcopy(formula2)) return statement1 == statement2, [statement1, steps1], [statement2, steps2] if __name__ == "__main__": PARSER = argparse.ArgumentParser(description="Generate Truth Table for a logical formula") PARSER.add_argument('formula1', metavar='formula1', type=str, help='First formula to consider') PARSER.add_argument('formula2', metavar='formula2', type=str, help='Second formula to consider') PARSER_ARGS = PARSER.parse_args() equal, formula1, formula2 = runner(PARSER_ARGS.formula1, PARSER_ARGS.formula2) if equal: for steps, step_type in formula1[1]: for step in steps: print(util.pretty_print(step).ljust(120) + " | " + util.StepTypes.get_message(step_type)) for steps, step_type in formula2[1]: for step in steps: print(util.pretty_print(step).ljust(120) + " | " + util.StepTypes.get_message(step_type))