Example #1
0
def file_process(argv):
    # access file to read model features.
    p_model = PresentationModel(PresentationModel.TRAINING_MODEL_FILE)

    analog_data = AnalogData(Parser.PAGESIZE)

    print('>> Start to receive data from FILE...')

    df = pd.DataFrame(
        columns={'delta_t', 'K', 'recorded_time', 'false_positive_ratio'})
    EVENT = 2485
    for c in [1, 5, 10, 15, 20, 25, 30, 35, 40]:
        for K in [10, 15, 20, 25, 30]:
            CONTINOUS_ANOMALY = c
            count = 0
            fp = open(argv[0], 'r')
            K /= 10.0
            line_number = 0
            warning_count = 0
            for file_line in fp:
                line_number += 1
                if line_number > EVENT:
                    break
                line = file_line.split(',')
                data = [float(val) for val in line[1:]]

                if len(data) != 3:
                    continue
                analog_data.add(data)
                data_list = analog_data.merge_to_list()
                real_data = p_model.pca_combine(data_list)
                peak_ave = Parser.find_peaks_sorted(real_data)
                valley_ave = Parser.find_valley_sorted(real_data)
                gap = np.mean(peak_ave) - np.mean(valley_ave)

                if line_number >= Parser.PAGESIZE and p_model.predict(gap,
                                                                      K) != 0:
                    count += 1
                    if count >= CONTINOUS_ANOMALY:
                        warning_count += 1
                else:
                    count = 0
            delta_t = c / 20
            rec_time = argv[1]
            e = df.shape[0]
            df.loc[e] = {
                'delta_t':
                delta_t,
                'recorded_time':
                rec_time,
                'false_positive_ratio':
                warning_count / (EVENT - Parser.PAGESIZE) * 100,
                'K':
                K
            }
            print(delta_t, rec_time,
                  warning_count / (EVENT - Parser.PAGESIZE) * 100, K)
    df = df[['recorded_time', 'delta_t', 'K', 'false_positive_ratio']]
    print(df)
    df.to_csv(argv[0][:-4] + '_res.csv', index=False)
Example #2
0
def main():
    parser = Parser()
    paren_tree = ParenTree()

    arg = ''.join(sys.argv[1:])

    if arg == 'test':
        result = test()
        demo('Test results:', result)
    else:
        input_expr = test_strings[arg] if test_strings.get(arg) else arg

        demo('RAW INPUT', input_expr)

        char_list = parser.parse_expression(input_expr)

        python_result = round(eval(''.join(char_list)), RESULT_ROUND_TO)

        # demo('PARSED INPUT', char_list)

        paren_tree.parse_char_list(char_list)

        # demo('PARSED PAREN TREE', paren_tree)

        result = round(paren_tree.evaluate(), RESULT_ROUND_TO)

        demo('RESULT', result)
Example #3
0
def main(filename):

	with open(filename, 'r') as f:
		data = f.readlines()[0].strip() # only interested in one line

	op_codes = data.split(",")
	op_codes[1] = "12"
	op_codes[2] = "2"

	new_state = Parser().parse_string(",".join(op_codes))
	print(f"Answer for part 1: {new_state.split(',')[0]}")

	# part 2
	answer = None
	for i in range(99):
		for j in range(99):
			op_codes = data.split(",")
			op_codes[1] = str(i)
			op_codes[2] = str(j)
			new_state = Parser().parse_string(",".join(op_codes))
			result, noun, verb = new_state.split(",")[:3]
			if int(result) == 19690720:
				answer = str(100 * int(noun) + int(verb))
				break
		else:
			continue
		break 
	if answer is not None:
		print(f"Answer for part 2: {answer}")
	else:
		print("Failed to find an answer for part 2.")
Example #4
0
def test():
    successes = []
    failures = []
    for key in test_strings:
        parser = Parser()
        paren_tree = ParenTree()

        char_list = parser.parse_expression(test_strings[key])
        # print(char_list)

        # print(round(eval(''.join(char_list)), ))
        python_result = round(eval(''.join(char_list)), RESULT_ROUND_TO)
        print(python_result)

        paren_tree.parse_char_list(char_list)

        test_result = round(paren_tree.evaluate(), RESULT_ROUND_TO)

        if test_result == python_result:
            successes.append(test_strings[key] + ' = ' + str(test_result))
        else:
            failures.append({
                'key': key,
                'expression': test_strings[key],
                'test_result': test_result,
                'python_result': python_result
            })

    message = ''

    if len(failures):
        message += f'{len(failures)} FAILURES:\n\n'
        for fail in failures:
            print(fail)
            message += f"key:           {fail['key']}\n"
            message += f"expression:    {fail['expression']}\n"
            message += f"test result:   {fail['test_result']}\n"
            message += f"python result: {fail['python_result']}\n\n"

    if len(successes):
        message += f'{len(successes)} SUCCESSES:\n\n'
        for s in successes:
            message += s + '\n\n'

    return message
    def test_read_with_nan_values(self):
        input = [['37.454012', '95.071431', '73.199394', '59.865848', 'nan'],
                 [
                     '15.599452', '5.808361', '86.617615', '60.111501',
                     '70.807258'
                 ], ['2.058449', '96.990985', 'nan', '21.233911', '18.182497'],
                 ['nan', '30.424224', '52.475643', '43.194502', '29.122914'],
                 ['61.185289', '13.949386', '29.214465', 'nan', '45.606998']]
        expected = [[37.454012, 95.071431, 73.199394, 59.865848, 'nan'],
                    [15.599452, 5.808361, 86.617615, 60.111501, 70.807258],
                    [2.058449, 96.990985, 'nan', 21.233911, 18.182497],
                    ['nan', 30.424224, 52.475643, 43.194502, 29.122914],
                    [61.185289, 13.949386, 29.214465, 'nan', 45.606998]]
        parser = Parser()

        actual = parser.parse(input)

        self.assertEqual(expected, actual)
Example #6
0
 def startDownload(self):
     urlList = Parser.Get_IMG_Urls(self.leURL.text())
     self.lwMain.clear()
     #self.lwMain.setRowCount(len(urlList) // self.ColumnCount)
     self.i = 0
     self.j = 0
     self.threads = []
     for url in urlList:
         #data = urllib.request.urlopen(url).read()
         downloader = DownloadThread(url)
         downloader.data_downloaded.connect(self.on_data_ready)
         self.threads.append(downloader)
         downloader.start()
    def test_run(self):
        program = Program()
        inputFileName = os.path.join(
            os.getcwd(), 'integration/example_data/input_test_data.csv')
        outputFileName = os.path.join(
            os.getcwd(), 'integration/example_data/interpolated_data.csv')
        expectedFileName = os.path.join(
            os.getcwd(), 'integration/example_data/interpolated_test_data.csv')
        program.transport = FileTransport(inputFileName, outputFileName)
        program.parser = Parser()
        program.interpolator = Interpolator()

        program.run()

        self.assertTrue(filecmp.cmp(expectedFileName, outputFileName))
Example #8
0
class Program:
    def __init__(self):
        self.transport = InputTransport()
        self.parser = Parser()
        self.interpolator = Interpolator()

    def run(self):
        try:
            input = self.transport.read()
            parsed_input = self.parser.parse(input)
            result = self.interpolator.interpolate(parsed_input)
        except ValueError:
            self.transport.write('Invalid matrix!')
        else:
            self.transport.write(result)
Example #9
0
#! /usr/bin/env python

import io
import json
from jinja2 import Template
from lib import Parser
from config import template_vars

src_html = ["src/en/index.html", "src/ch/index.html", "src/faq/index.html"]
strings = dict()
for fname in src_html:
    with io.open(fname, encoding="utf-8") as f:
        p = Parser(strings)
        p.feed(Template(f.read()).render(template_vars))
        p.close()

with io.open("lang/en_US.json", "w", encoding="utf-8") as f:
    f.write(
        unicode(
            json.dumps(strings,
                       ensure_ascii=False,
                       sort_keys=True,
                       indent=0,
                       separators=(',', ': '))))
Example #10
0
 def __init__(self):
     self.transport = InputTransport()
     self.parser = Parser()
     self.interpolator = Interpolator()
Example #11
0
 def test_part1_1(self):
     new_state = Parser().parse_string("1,0,0,0,99")
     self.assertEqual(new_state, "2,0,0,0,99")
Example #12
0
 def test_part1_2(self):
     new_state = Parser().parse_string("2,3,0,3,99")
     self.assertEqual(new_state, "2,3,0,6,99")
Example #13
0
 def test_part1_4(self):
     new_state = Parser().parse_string("1,1,1,4,99,5,6,0,99")
     self.assertEqual(new_state, "30,1,1,4,2,5,6,0,99")
Example #14
0
 def test_part1_5(self):
     new_state = Parser().parse_string("1,9,10,3,2,3,11,0,99,30,40,50")
     self.assertEqual(new_state, "3500,9,10,70,2,3,11,0,99,30,40,50")
Example #15
0
from lib import Parser

# call in main method in testdata
# TEST_DATA_PARSED_XML = Parser.parse(test_data.xml')

# run app.py
TEST_DATA_PARSED_XML = Parser.parse('./test/test_data.xml')

TEST_DATA_OVERVIEW = [
    {'id': 1, 'title': 'Data 1', 'comment': 'comment 1', 'content': ''},
    {'id': 2, 'title': 'Data 2', 'comment': 'comment 2', 'content': ''},
    {'id': 3, 'title': 'Data 3', 'comment': 'comment 3', 'content': ''}
]


if __name__ == '__main__':

    print('data = {}'.format(TEST_DATA_PARSED_XML))
Example #16
0
 def test_part1_3(self):
     new_state = Parser().parse_string("2,4,4,5,99,0")
     self.assertEqual(new_state, "2,4,4,5,99,9801")
Example #17
0
def gis(arg):
    parse_GIS = Parser(args.input)
    dict_gis = parse_GIS.load_json()

    obj = GIS(dict_gis)
    obj.init()
Example #18
0
    FLUSHING_RATIO = args["flush"]
    N_DOCS = args["ndocs"]
    CONFIDENCE = args["smooth"]
    SILENT = args["silent"]

    curr = N_GRAMS
    WORK = CORPUS
    preprocessor = Preprocessor()

    print "Starting preprocessing..."
    unigram_f, freqBand = unigram_frequencies(preprocessor, WORK, MAX_F, N_DOCS)

    while curr > 1:

        print "ngrams of n=%d\nngrams of n=%d: parsing...\nngrams of n=%d: starting parser..." % (curr, curr, curr)
        parser = Parser(curr, MIN_F, unigram_f, freqBand)
        print "ngrams of n=%d: started!\nngrams of n=%d: parsing input from stream..." % (curr, curr)
        streamer = Streamer(WORK, n=N_DOCS)
        for line in streamer:
            if WORK == CORPUS:
                parser(preprocessor(line), flush_at=FLUSHING_RATIO)
            else:
                parser(line, flush_at=FLUSHING_RATIO)
        print "ngrams of n=%d: finished parsing." % curr
        print "ngrams of n=%d: rewriting corpus with multiwords..." % curr
        parser.rewrite()

        print "ngrams of n=%d: storing temporary snapshot of the data for feedback..." % curr
        WORK = persist(TMP, parser)
        print "ngrams of n=%d: done!" % curr
        curr -= 1
Example #19
0
def gis(arg):
	parse_GIS = Parser(args.input)
	dict_gis = parse_GIS.load_json()

	obj = GIS(dict_gis)
	obj.init()
Example #20
0
 def test_construct(self):
     parser = Parser()
     self.assertIsNotNone(parser)
Example #21
0
 def get_command(self):
     ret = self.http.response()
     parse = Parser.Parser()
     data = parse.parse_struct(ret)
     return data
Example #22
0
 def GetEmail(self):
     ((FindEmail)) = ((Parser.MyParser(self.Results,
                                       self.UrlCheck(self.Keyword))))
     return ((FindEmail.Emails()))
Example #23
0
 xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
 xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"
 xmlns:soapenc="http://schemas.xmlsoap.org/soap/encoding/"
 xmlns:cwmp="urn:dslforum-org:cwmp-1-1">
<soap:Header>
</soap:Header>
<soap:Body>
<cwmp:GetParameterNames>
<ParameterPath>InternetGatewayDevice.DeviceInfo.Hello</ParameterPath>
<NextLevel>false</NextLevel>
</cwmp:GetParameterNames>
</soap:Body>
</soap:Envelope>
"""

parse=Parser.Parser()
commands_server=parse.parse_struct(xml)

for command in commands_server.keys():
  if re.search(r'getparameternames',command.lower()):
        print (command)
        params=commands_server[command]

        print (params['ParameterPath'])


    
    


Example #24
0
if __name__ == "__main__":

    start = time.time()
    resultpath = dirinit()
    config_files = InputParser()
    config_files.check_input_files()
    config_files.input_parser()
    run()
    coll = str(len("Collecting Information from the switches"))
    with tqdm(
        total=1,
        file=sys.stdout,
        desc="Parsing switches Information",
        ascii=True,
        bar_format="{desc:37}: {percentage:3.0f}%|{bar}|",
    ) as pbar:
        parser = Parser()
        data = parser.file_parse()
        pbar.update()
    with tqdm(
        total=1,
        file=sys.stdout,
        desc="Genrating report",
        ascii=True,
        bar_format="{desc:37}: {percentage:3.0f}%|{bar}|",
    ) as pbar:
        parser.reporter(data)
        pbar.update()
    dirend()
    print("Reports have been generated in the following path : %s" % resultpath)
Example #25
0
                        type=str,
                        help='Pie plot title',
                        required=False)

    args = parser.parse_args()

    if args.gis:
        gis(args.gis)

    if args.type == 'Pie' and args.title == None:
        parser.error(
            'If you want a Pie plot, you also have to set a title with -title argument'
        )

    if args.type == "bar":
        parse_bar = Parser.ParseBar(args.input)
        dict_bar = parse_bar.process_data()

        if args.dimension == "2D":
            bar = Bar.Bar2D(dict_bar)
        else:
            bar = Bar.Bar(dict_bar)

        if args.view == "image":
            bar.init("image")
        else:
            bar.init("gui")

    elif args.type == "scatter":
        parse_scatter = Parser.ParseScatter(args.input)
        dict_scatter = parse_scatter.process_data()