def setUp(self): super(TestFileParser, self).setUp() spec_file = (self.resource_folder / "invalid_encoding_spec.json").absolute() self.invalid_encoding_parser = FileParser(spec_file=spec_file) spec_file = (self.resource_folder / "no_encoding_spec.json").absolute() self.no_encoding_parser = FileParser(spec_file=spec_file) spec_file = (self.resource_folder / "no_header_spec.json").absolute() self.no_header_parser = FileParser(spec_file=spec_file) spec_file = (self.resource_folder / "spec.json").absolute() self.valid_parser = FileParser(spec_file=spec_file) self.existing_fixed_width_file = (self.resource_folder / "fixed_width_file.txt").absolute() self.existing_delimited_file = (self.resource_folder / "delimited_file.txt").absolute() self.existing_delimited_file_with_header = ( self.resource_folder / "delimited_file_with_header.txt").absolute() self.fixed_width_file = (self.temp_folder / "fixed_width_file.txt").absolute() self.delimited_file = (self.temp_folder / "delimited_file.txt").absolute() self.invalid_file = (self.temp_folder / "filedoesnotexist.txt").absolute() self.output_with_header_file = (self.temp_folder / "output_with_header.txt").absolute() self.output_without_header_file = ( self.temp_folder / "output_without_header.txt").absolute()
def setup_app(): parser = argparse.ArgumentParser() parser.add_argument('-f', dest='txtFile', default='./data.txt', help='text file with structured data to be parsed') args = parser.parse_args() fileParser = FileParser(args.txtFile) app.run(debug=True)
def execute(): try: arguments = ArgParser().parse_args() if arguments: print("Calculating....\n") return FileParser(**arguments).parse_file_and_calculate() except KeyboardInterrupt: print(PrintUtils.main_screen_format(Logs.SEE_YOU_LATER_LOG)) except Exception as err: print(PrintUtils.error_format(Logs.EXECUTION_FAILED_ERROR.format(err)))
def main(): parser = build_parser() options = parser.parse_args() file_parser = FileParser(SECTION_REGEXES, PAPER_REGEXES) papers, sections = file_parser.parse_file(OVERVIEW_FILE) paper_checks = { options.section: lambda paper, option: paper.section == Section(option), options.papers: lambda paper, option: PaperParser.fuzzy_paper_name_match( paper.title, option, FUZZY_PAPER_NAME_MATCH_PERCENT), options.changed: lambda paper, option: paper.changed, options.tags: lambda paper, option: PaperParser.check_tags(paper.tags, option), options.to_read: lambda paper, option: not paper.is_read } papers = PaperParser(paper_checks).parse_papers(papers) if not papers: print(red("No papers match your search!")) return if options.download: for paper in papers: try: PaperDownloader.download(paper) except KeyboardInterrupt: print(red("Stopped download!")) elif options.bibtex: with open(BIBFILE, 'w') as file: for paper in papers: print('- {}'.format(paper)) bibtex = PaperDownloader.get_paper_bibtex(paper) if bibtex: file.write(bibtex) else: if options.section or options.papers or options.changed or options.to_read or options.tags: papers_by_section = Section.gather_papers_by_section(papers) for section in papers_by_section.keys(): print('{}\n{}'.format( section, '\n'.join([ '- {}'.format(paper) for paper in papers_by_section[section] ]))) else: for section in sections: print("{} ({} papers)".format( section, len(section.get_papers_in_section(papers)))) print("\nTags: [{}]".format(";".join( sorted(list(set([tag for paper in papers for tag in paper.tags]))))))
def main(): fp = None if len(sys.argv) == 1: print("Usage: <command> <configuration-file>") return else: fp = FileParser(sys.argv[1]) if not fp.exists(): print( "Configuartion file does not exists! Please check the given path.") return fp.load_config() fp.generate_file_tree() print_tree_stats(fp) gen = Generator("/home/janschon/", fp.get_tree()) gen.generate_cpp()
from coffee_finder import CoffeeFinder from file_parser import FileParser parser = FileParser("tests/test_one.txt") parser.get_initials() city = parser.get_city() city.print_layout() queries = parser.get_queries() coffee_finder = CoffeeFinder(city) for query in queries: optimum = coffee_finder.find_coffee(int(query)) for coffee_number, cell in optimum.items(): print("Found {0} coffeeshops near this cell: {1}".format( coffee_number, cell)) print("\n") parser = FileParser("tests/test_two.txt") parser.get_initials() city = parser.get_city() city.print_layout() queries = parser.get_queries() coffee_finder = CoffeeFinder(city) for query in queries: optimum = coffee_finder.find_coffee(int(query)) for coffee_number, cell in optimum.items(): print("Found {0} coffeeshops near this cell: {1}".format( coffee_number, cell))
import globals_ from file_parser import FileParser from main_setup import main_setup def simulate(network): ''' Takes a network. Runs the simulation! ''' globals_.event_manager.register_network(network) globals_.event_manager.run() if __name__ == '__main__': if len(sys.argv) != 2 and len(sys.argv) != 3: print 'Usage: %s test_file.json [output_name]' % sys.argv[0] sys.exit(-1) test_case_name = sys.argv[1] output_name = 'output' if len(sys.argv) > 2: output_name = sys.argv[2] main_setup(output_name) file_parser = FileParser() network = file_parser.create_network(test_case_name) simulate(network) globals_.stats_manager.output_graphs()
import sys import copy import config from utils import calc_base_weights, get_topn_nodes, prune_nodes, get_class_label, calc_tag_probs from file_parser import FileParser from tree_node import Node # Declare all variables TEST_FILE = sys.argv[1] BOUNDARY_FILE = sys.argv[2] MODEL_FILE = sys.argv[3] SYS_OUTPUT = sys.argv[4] config.BEAM_SIZE = int(sys.argv[5]) config.TOPN = int(sys.argv[6]) config.TOPK = int(sys.argv[7]) data = FileParser(TEST_FILE, BOUNDARY_FILE, MODEL_FILE) # Beam search for sent_idx in range(len(data.test_set)): total_paths = {} data.final_probs[sent_idx] = {} root = Node('BOS', None, None, 0) history = copy.deepcopy(data.test_set[sent_idx][0][1]) tag1 = 'prevTwoTags=BOS+BOS' tag2 = 'prevT=BOS' base_weights = calc_base_weights(data.weights, data.tagset, history) tag_dict = calc_tag_probs(data.weights, base_weights, tag1, tag2) top_n = get_topn_nodes(tag_dict, root) pruned_nodes = prune_nodes(top_n, total_paths) data.final_probs[sent_idx][0] = get_class_label(pruned_nodes)
def test_generate_fixed_width_file_invalid_columns(self): with self.assertRaises(ValueError): FileParser(spec_file="{folder}/invalid_columns_spec.json".format( folder=self.resource_folder))
def test_invalid_spec(self): with self.assertRaises(FileNotFoundError): FileParser(spec_file=self.invalid_file)
def __init__(self,filename): self.filepath=filename self.file_parser=FileParser(filename," et ")
parser.add_argument('--lines', help='Number of lines of sample data', default=10) parser.add_argument('--generate_fix_width_file', help='Generate fixed width file') parser.add_argument('--fix_width_file', help='Fixed width file for delimited file') parser.add_argument('--generate_delimited_file', help='Generate delimited file') parser.add_argument('--generate_csv_file', help='Generate CSV file') parser.add_argument('--csv_file', help='CSV file to hash') parser.add_argument('--hash_csv_file', help='Hashed CSV file') args = parser.parse_args() if args.type == 'fp': fp = FileParser(spec_file=args.spec_file) if args.generate_fix_width_file: if args.lines: fp.generate_fixed_width_file( fixed_width_file=args.generate_fix_width_file, lines_count=args.lines) else: fp.generate_fixed_width_file( fixed_width_file=args.generate_fix_width_file) elif args.generate_delimited_file: fp.generate_delimited_file(fixed_width_file=args.fix_width_file, delimited_file=args.generate_delimited_file) elif args.type == 'dp': dp = DataProcessor(spec_file=args.spec_file) if args.generate_csv_file: if args.lines:
from typing import List from configuration.configuration_provider import ConfigurationProvider from file_parser import FileParser from generator.factory import GeneratorFactory from model import Relation from performance import performance from sql_script_builder import SqlScriptBuilder parser = FileParser() configuration_provider = ConfigurationProvider() generator_factory = GeneratorFactory() builder = SqlScriptBuilder() @performance def parse(filepath: str) -> List[Relation]: return parser.parse(filepath) @performance def configure(relations: List[Relation]): for relation in relations: for column in relation.columns.values(): config = configuration_provider.provide(relation, column) column.generator = generator_factory.create(column, config) @performance def generate(relations: List[Relation]): builder.generate(relations)
from flask import Flask, jsonify from flask import request import argparse from file_parser import FileParser app = Flask(__name__) fileParser = FileParser('./data.txt') def setup_app(): parser = argparse.ArgumentParser() parser.add_argument('-f', dest='txtFile', default='./data.txt', help='text file with structured data to be parsed') args = parser.parse_args() fileParser = FileParser(args.txtFile) app.run(debug=True) @app.route('/', methods=['GET']) def index_route(): return jsonify({'about': 'python assignment'}) @app.route('/interface/<path:interface>', methods=['GET']) def interface_route(interface): if(interface == 'all'): return jsonify(fileParser.getAllInterfaces()) else: return jsonify(fileParser.getInterface(interface))