Example #1
0
def load_config_file(config_file, ignore_names):
    """ Loads and builds the model from the configuration

    Arguments:
        config_file: The configuration file
        ignore_names: A set of names that should be ignored during the loading.
    """
    config_dicts = parsing.parse_file(config_file)
    message("Configure file is parsed.")

    # first load the configuration into a dictionary
    """
    for k in config_dicts:
        print(k, config_dicts[k])
    """

    if "main" not in config_dicts:
        raise Exception("Configuration does not contain the main block.")

    existing_objects = dict()

    main_config = config_dicts['main']

    configuration = dict()
    for key, value in main_config.items():
        if key not in ignore_names:
            try:
                configuration[key] = build_object(value, config_dicts,
                                                  existing_objects, 0)
            except Exception:
                raise Exception("Can't parse key: {}".format(key))

    return configuration
Example #2
0
 def setUp(self):
     self.name = "Process tree unittest"
     self.rootdir = '../examples/1'
     self.ps_stats = parsing.parse_file(
         parsing.ParserState(), self.mk_fname('proc_ps.log')).ps_stats
     self.processtree = process_tree.ProcessTree(self.ps_stats,
                                                 None,
                                                 False,
                                                 for_testing=True)
Example #3
0
	def testparseProcDiskStatLog(self):
		state_with_headers = parsing.parse_file(writer, parsing.ParserState(), self.mk_fname('header'))
		state_with_headers.headers['system.cpu'] = 'xxx (2)'
		samples = parsing.parse_file(writer, state_with_headers, self.mk_fname('proc_diskstats.log')).disk_stats
		self.assertEqual(141, len(samples))
	
		for index, line in enumerate(open(self.mk_fname('extract.proc_diskstats.log'))):
			tokens = line.split('\t')
			sample = samples[index]
			if debug:		
				print(line.rstrip())
				print(sample)
				print('-------------------')
			
			self.assertEqual(tokens[0], str(sample.time))
			self.assert_(floatEq(float(tokens[1]), sample.read))
			self.assert_(floatEq(float(tokens[2]), sample.write))
			self.assert_(floatEq(float(tokens[3]), sample.util))
    def testparseProcDiskStatLog(self):
        state_with_headers = parsing.parse_file(parsing.ParserState(),
                                                self.mk_fname('header'))
        state_with_headers.headers['system.cpu'] = 'xxx (2)'
        samples = parsing.parse_file(
            state_with_headers, self.mk_fname('proc_diskstats.log')).disk_stats
        self.assertEqual(141, len(samples))

        for index, line in enumerate(
                open(self.mk_fname('extract.proc_diskstats.log'))):
            tokens = line.split('\t')
            sample = samples[index]
            if debug:
                print line.rstrip(),
                print sample
                print '-------------------'

            self.assertEqual(tokens[0], str(sample.time))
            self.assert_(floatEq(float(tokens[1]), sample.read))
            self.assert_(floatEq(float(tokens[2]), sample.write))
            self.assert_(floatEq(float(tokens[3]), sample.util))
Example #5
0
	def testparseProcStatLog(self):
		samples = parsing.parse_file(writer, parsing.ParserState(), self.mk_fname('proc_stat.log')).cpu_stats
		self.assertEqual(141, len(samples))
			
		for index, line in enumerate(open(self.mk_fname('extract.proc_stat.log'))):
			tokens = line.split('\t')
			sample = samples[index]
			if debug:
				print(line.rstrip())
				print(sample)
				print('-------------------')
			self.assert_(floatEq(float(tokens[0]), sample.time))
			self.assert_(floatEq(float(tokens[1]), sample.user))
			self.assert_(floatEq(float(tokens[2]), sample.sys))
			self.assert_(floatEq(float(tokens[3]), sample.io))
    def testparseProcStatLog(self):
        samples = parsing.parse_file(parsing.ParserState(),
                                     self.mk_fname('proc_stat.log')).cpu_stats
        self.assertEqual(141, len(samples))

        for index, line in enumerate(
                open(self.mk_fname('extract.proc_stat.log'))):
            tokens = line.split('\t')
            sample = samples[index]
            if debug:
                print line.rstrip()
                print sample
                print '-------------------'
            self.assert_(floatEq(float(tokens[0]), sample.time))
            self.assert_(floatEq(float(tokens[1]), sample.user))
            self.assert_(floatEq(float(tokens[2]), sample.sys))
            self.assert_(floatEq(float(tokens[3]), sample.io))
Example #7
0
	def testParseProcPsLog(self):
		state = parsing.parse_file(writer, parsing.ParserState(), self.mk_fname('proc_ps.log'))
		samples = state.ps_stats
		processes = samples.process_list
		sorted_processes = sorted(processes, key=lambda p: p.pid )
		
		for index, line in enumerate(open(self.mk_fname('extract2.proc_ps.log'))):
			tokens = line.split();
			process = sorted_processes[index]
			if debug:	
				print(tokens[0:4])
				print(process.pid, process.cmd, process.ppid, len(process.samples))
				print('-------------------')
			
			self.assertEqual(tokens[0], str(process.pid))
			self.assertEqual(tokens[1], str(process.cmd))
			self.assertEqual(tokens[2], str(process.ppid))
			self.assertEqual(tokens[3], str(len(process.samples)))
    def testParseProcPsLog(self):
        state = parsing.parse_file(parsing.ParserState(),
                                   self.mk_fname('proc_ps.log'))
        samples = state.ps_stats
        processes = samples.process_list
        sorted_processes = sorted(processes, key=lambda p: p.pid)

        for index, line in enumerate(
                open(self.mk_fname('extract2.proc_ps.log'))):
            tokens = line.split()
            process = sorted_processes[index]
            if debug:
                print tokens[0:4]
                print process.pid, process.cmd, process.ppid, len(
                    process.samples)
                print '-------------------'

            self.assertEqual(tokens[0], str(process.pid))
            self.assertEqual(tokens[1], str(process.cmd))
            self.assertEqual(tokens[2], str(process.ppid))
            self.assertEqual(tokens[3], str(len(process.samples)))
Example #9
0
def load_plants():
    """Loads plants from plants into database."""

    for d in parse_file('pollinator-plants.txt'):
        # for debugging (may need this later)
        #if d.get('notes') == None:
        #pdb.set_trace()
        #print(d['common_name'])
        plant = Plant(
            region=d['region'],
            plant_type=d['plant_type'],
            bloom_period=d['bloom_period'],
            common_name=d['common_name'],
            scientific_name=d['scientific_name'],
            life_cycle=d['life_cycle'],
            flower_color=d['flower_color'],
            max_height=d['max_height'],
            water_needs=d['water_needs'],
            notes=d['notes'],
        )

        db.session.add(plant)

    db.session.commit()
Example #10
0
	def test_parseTimedBlocks(self):
		state = parsing.parse_file(writer, parsing.ParserState(), self.mk_fname('proc_diskstats.log'))
		self.assertEqual(141, len(state.disk_stats))		
Example #11
0
	def testParseHeader(self):
		state = parsing.parse_file(writer, parsing.ParserState(), self.mk_fname('header'))
		self.assertEqual(6, len(state.headers))
		self.assertEqual(2, parsing.get_num_cpus(state.headers))
Example #12
0
#well_index_config = WellIndexConfig(global_config.well_index_filename)

configs_to_parse = [completion_config,
                 prod_by_operated_day_config,
                 ]

well_manager = WellManager()#Instantiate WellManager which acts as a store for all well data
net_factory = NeuralNetFactory()


#Keep track of incomplete wells to prune after parsing all files
skipped_apis = set()

#Parse files
for c in configs_to_parse:
    skipped_apis.update(parse_file(c, well_manager, global_config.data_directory))

for s in skipped_apis:
    well_manager.remove_well(s)

print("{0} skipped, {1} remaining, {2} parsed".format(len(skipped_apis), len(well_manager.get_apis()), len(skipped_apis) + len(well_manager.get_apis())))



#Train and test multiple nets
all_runs = [RunParams("single", {"neural_net_type": NeuralNetTypes.Basic, "num_neurons_per_layer":2}),
            RunParams("batch_normalized", {"neural_net_type": NeuralNetTypes.BatchNormalized}),
            RunParams("dual_layer_4", {"neural_net_type": NeuralNetTypes.Dual_Layer_Basic, "num_neurons_per_layer": 8}),
            RunParams("dual_layer_8", {"neural_net_type": NeuralNetTypes.Dual_Layer_Basic, "num_neurons_per_layer": 4})
           ]
Example #13
0
def fetch(dataset):
    return extraction.humanize_names(parsing.parse_file(f"../results/{dataset}"))
def calcolate_best(filename):
    data = parse_file(filename)
Example #15
0
import math
import random
import re
from xml.dom.minidom import parse, parseString
import xml.dom.minidom as minidom
from collections import OrderedDict,defaultdict
import pickle
from sets import Set
from operator import itemgetter
import os
import nltk
from nltk.tokenize.regexp import RegexpTokenizer
import time
import Queue

import parsing
import indexing



if __name__ == "__main__":
        if len(sys.argv)!=2:                # Expect exactly 1 argument
                sys.exit(2)

        path1 = sys.argv[1]
        
        parsing.parse_file(path1)

        indexing.create_index()

Example #16
0
        if match_res is not None:
            or_node = OR()
            tree.append(or_node)
            nodes = populate(rule.antecedent(), match_res)
            if type(nodes) == str:
                nodes = [nodes]
            for i in range(len(nodes)):
                nodes[i] = backchain_to_goal_tree(rules, nodes[i])
            or_node.append(nodes)

    tree = simplify(tree)
    return tree


def apply_forward_chain(rules, facts):
    init_facts = facts
    final_facts = forward_chain(rules, facts)
    print("input facts: {}".format(init_facts))
    print("consequent facts: {}".format(set(final_facts) - set(init_facts)))


facts, rules = parse_file("data.txt")
# backward chain
pretty_goal_tree(backchain_to_goal_tree(rules, 'гость - Тралл'))
# pretty_goal_tree(backchain_to_goal_tree(rules, 'гость - Нелтарион'))
# pretty_goal_tree(backchain_to_goal_tree(rules, 'гость - Вол\'Джин'))
# forward chain
# apply_forward_chain(rules, [facts['F18'], facts['F25'], facts['F43']])
# apply_forward_chain(rules, [facts['F23'], facts['F24'], facts['F43']])
# apply_forward_chain(rules, [facts['F19'], facts['F33'], facts['F35'], facts['F49'], facts['F44']])
Example #17
0
import argparse
from algorithm import algorithm
from parsing import parse_file


def parse_args():
    parser = argparse.ArgumentParser(description='Solver of N-Puzzle')
    parser.add_argument("-f", "--file", type=str, required=True,
                        help="path of n-puzzle file")
    parser.add_argument("-l", "--linear", action="store_true",
                        help="resolve N-Puzzle lineary")
    parser.add_argument("-he", "--heuristic", choices=["euclidian", "square_euclidian", "manhattan", "gaschnig", "linear_conflict", "hamming"], default="manhattan",
                        help="heuristic function used Default: manhattan")
    parser.add_argument("-a", "--algorithm", choices=["a*", "greedy", "uniform cost"], default="a*",
                        help="search algorithm used Default: a*")
    parser.add_argument("-v", "--verbose", action="store_true",
                        help='display path to final puzzle')
    return parser.parse_args()


if __name__ == "__main__":
    args = parse_args()
    puzzle = parse_file(args.file, args.linear)
    algorithm(puzzle, args.linear, args.verbose, args.heuristic, args.algorithm)
Example #18
0
def load_results(files, names=None):
    if names is None:
        names = [f"{os.path.basename(name)} time" for name in files]
    return join_results([parsing.parse_file(f) for f in files], names)
    def setUp(self):
	self.name = "Process tree unittest"
        self.rootdir = '../examples/1'
	self.ps_stats = parsing.parse_file(parsing.ParserState(), self.mk_fname('proc_ps.log')).ps_stats
        self.processtree = process_tree.ProcessTree(self.ps_stats, None, False, for_testing = True)
 def testParseHeader(self):
     state = parsing.parse_file(parsing.ParserState(),
                                self.mk_fname('header'))
     self.assertEqual(6, len(state.headers))
     self.assertEqual(2, parsing.get_num_cpus(state.headers))
 def test_parseTimedBlocks(self):
     state = parsing.parse_file(parsing.ParserState(),
                                self.mk_fname('proc_diskstats.log'))
     self.assertEqual(141, len(state.disk_stats))
Example #22
0
    fig.legend(handles, labels, loc='upper right')
    fig.tight_layout()
    # Magic formula for providing space for the legend
    fig.subplots_adjust(top=0.875, right=0.925)
    fig.savefig(filename)
    plt.close(fig)


if __name__ == '__main__':
    if len(sys.argv) != 5:
        print(
            "Usage: [swizzleflow times] [swizzleflow times loads only] [swizzle inventor times] [prefix]"
        )
        sys.exit(1)
    _, swflow_file, swflow_loads_file, swinv_file, prefix = sys.argv
    swflow_data = parsing.parse_file(swflow_file)
    swflow_load_data = parsing.parse_file(swflow_loads_file)
    swinv_data = parsing.parse_swizzle_inventor_file(swinv_file)

    details, comparison = process_data(swflow_data, swflow_load_data,
                                       swinv_data)

    details = to_plot_df(details)
    comparison = to_plot_df(comparison)

    details.to_csv(f"{prefix}-details.csv", index=False)
    comparison.to_csv(f"{prefix}-comparison.csv", index=False)

    plot(details, False, "Synthesis time breakdown", f"{prefix}-details.pdf")
    plot(details, True, "Synthesis time breakdown (log)",
         f"{prefix}-details-log.pdf")
def generate_allocations(projects, services):
    gas = []
    for project in projects:
        gas.append(generate_allocation_project(project, services))
    return gas
    
def generate_output(allocations, data):
    for allocation in allocations:
        output_list = []
        for a in allocation['allocation']:
            names = [p["name"] for p in data['providers']]
            privider = a['provider']
            regions = [p for p in data['providers'] if p['name'] == privider][0]['regions']
            rnames = [r['name'] for r in regions]
            idx, n, allocated =  (names.index(a['provider']), rnames.index(a['name']), a['allocated'])
            output_list.append(str(idx))
            output_list.append(str(n))
            output_list.append(str(allocated))
        print(' '.join(output_list))


from parsing import parse_file

if __name__ == '__main__':
    data = parse_file('inputs/test.in')
    projects = data['projects']
    providers = data['providers']
    services = get_services(providers)
    gal = generate_allocations(projects, services)
    print('----', gal)