示例#1
0
def dirTraverse_DFS(path, step, stop):
    if os.path.exists(path):
        curlist = os.listdir(path)
        curlist.sort()
        #print curlist
        for item in curlist:
            item_path = path + "/" + item
            print item_path
            stop += 1
            commands = run(stmt_file_path, item_path, step, stop)
            runFdsm(commands, item_path)
            if os.path.isdir(item_path):
                dirTraverse_DFS(item_path, step, stop)
            step += 1
        commands = run(stmt_file_path, "", step, stop)
        #print commands
        runFdsm(commands, "")
示例#2
0
def dirTraverse_DFS(path, step, stop):
	if os.path.exists(path):
		curlist = os.listdir(path)
		curlist.sort()
		#print curlist
		for item in curlist:
			item_path = path + "/" + item
			print item_path
			stop += 1
			commands = run(stmt_file_path, item_path, step, stop)
			runFdsm(commands, item_path)
			if os.path.isdir(item_path):
				dirTraverse_DFS(item_path, step, stop)
			step += 1
		commands = run(stmt_file_path, "", step, stop)
		#print commands
		runFdsm(commands, "")
示例#3
0
def dirTraverse_BFS(rootDir):
    step = 0
    stop = 0
    dirlist = Queue.Queue()
    dirlist.put(rootDir)
    while not dirlist.empty():
        fullpathname = dirlist.get()
        if os.path.exists(fullpathname):
            curlist = os.listdir(fullpathname)
            curlist.sort()
            stop += len(curlist)
            for fdname in curlist:
                item_path = fullpathname + "/" + fdname
                if os.path.isdir(item_path):
                    dirlist.put(item_path)
                print item_path
                commands = run(stmt_file_path, item_path, step, stop)
                #print commands
                runFdsm(commands, item_path)
                step += 1
    commands = run(stmt_file_path, "", step, stop)
    #print commands
    runFdsm(commands, "")
示例#4
0
def dirTraverse_BFS(rootDir):
	step = 0
	stop = 0
	dirlist = Queue.Queue()
	dirlist.put(rootDir)
	while not dirlist.empty():
		fullpathname = dirlist.get()
		if os.path.exists(fullpathname):
			curlist = os.listdir(fullpathname)
			curlist.sort()
			stop += len(curlist)
			for fdname in curlist:
				item_path = fullpathname + "/" + fdname
				if os.path.isdir(item_path):
					dirlist.put(item_path)
				print item_path 
				commands = run(stmt_file_path, item_path, step, stop)
				#print commands
				runFdsm(commands, item_path)
				step += 1
	commands = run(stmt_file_path, "", step, stop)
	#print commands
	runFdsm(commands, "")
示例#5
0
def parse():
    input = request.form.get('input')
    if input == None:
        return 'key: input not found in body'

    conversionFrom = request.form.get('conversionFrom')
    if conversionFrom == None:
        return 'key: conversionFrom not found in body'

    conversionTo = request.form.get('conversionTo')
    if conversionTo == None:
        return 'key: conversionTo not found in body'

    conversion = conversionFrom + '-' + conversionTo

    try:
        return parser.run(conversion, input)
    except ValueError as e:
        return e.args[0], 400
示例#6
0
def extractionSpecification(files):
    _authenticate()
    for f in files:
        xspec = parser.run('data/' + f + '.xs')

        # create context, save context id
        cm = xspec['header']['Concept Model']
        oid = xspec['header']['OID']

        ctx = storage.persistent(xspec['header']['Context Name'].lower() + '.ctx')

        for de, xss in xspec['entries'].iteritems():
            for _xs in xss:
                _xs['de'] = ctx['demap'][de]
                _xs['concept'] = cm
                _xs['oid'] = oid

                xs.save(_xs)

        storage.persistent(f + '.xs', xspec)
示例#7
0
def parse(self, key, callback_url, api_file=None):
    import parser
    notify = Notify(self.request.id, callback_url)
    try:
        result = parser.run(key, notify, api_file=api_file)
        api_post.apply_async(kwargs={'url': callback_url, 'retry': True}, countdown=1)
        return result
    except SoftTimeLimitExceeded as e:
        notify.send('Timeout, stop job')
        api_parse_collect(countdown=2)
        raise e
    except (BotoClientError, BotoServerError) as e:
        msg = 'Sorry, error occure, will try to repeat after 3 minutes.'
        notify.send(msg)
        notify.admin_send('AWS ERROR: %s' % e.message)
        log.error(msg)
        raise self.retry(exc=e, countdown=60*2)
    except Exception as e:
        log.critical(e.message)
        notify.admin_send('parse raised: %s' % e.message)
        notify.send('Sorry, parser terminated with error, '
                    'we will fix it and reply to you, thanks!')
        api_parse_collect(countdown=2)
        raise e
示例#8
0
def run():
	common.config.init()
	common.models.init()
	
	parser.run()
	server.run()
示例#9
0
def context(files):
    _authenticate()
    for f in files:
        context = parser.run('data/' + f + '.ctx')

        # create context, save context id
        res = ctx.save(context['header']['Context Name'], context['header']['Context Definition'])
        context['id'] = res['id']

        # create object classes, save object class id
        for _, _oc in context['ocs'].iteritems():
            _oc['context'] = context['id']
            res = oc.save(_oc)
            _oc['id'] = res['id']

        # create data types
        for _, _dt in context['dts'].iteritems():
            dt.save(_dt)

        # create conceptual domains, update
        # in the case of name collision
        # then create value domains
        context['vds'] = {}
        for _, _cd in context['cds'].iteritems():
            existing = cd.get({
                'name': _cd['name']
            })

            if existing:
                _cd['id'] = existing['id']
                cd.update(_cd)
            else:
                res = cd.save(_cd)
                _cd['id'] = res['id']

            _vd = {
                'cd': _cd['id'],
                'name': _cd['name'] + ':' + _cd['dt'],
                'context': context['id'],
                'dt': context['dts'][_cd['dt']],
                'enumerated': _cd['enumerated'],
                'pvs': []
            }
            res = vd.save(_vd)
            _vd['id'] = res['id']
            context['vds'][_cd['name']] = _vd

        # create data element concepts
        # and data elements
        context['demap'] = {}
        for _, _oc in context['ocs'].iteritems():
            for _de in _oc['properties']:
                _de['cd'] = context['cds'][_de['cd']]
                _de['context'] = context['id']
                res = dec.save(_de, _oc)
                _de['dec'] = res['id']

                _de['vd'] = context['vds'][_de['cd']['name']]
                res = de.save(_de)
                _de['id']['remote'] = res['id']

                context['demap'][_de['id']['local']] = _de['id']['remote']

        storage.persistent(f + '.ctx', context)
示例#10
0
def train_dev(word_ids, tag_ids, head_ids, rel_ids, indices, isTrain):
    losses = []
    tot_tokens = 0
    tot_cor_arc = 0
    tot_cor_rel = 0

    step = 0
    parser._pdrop_embs = config.pdrop_embs * isTrain
    parser._pdrop_lstm = config.pdrop_lstm * isTrain
    parser._pdrop_mlp = config.pdrop_mlp * isTrain
    parser.embd_mask_generator(parser._pdrop_embs, indices)

    sent_ids = [i for i in range(len(word_ids))]

    if config.small_model:
        num_sents = 10000 if isTrain else 10
        sent_ids = sent_ids[:num_sents]

    if isTrain:
        np.random.shuffle(sent_ids)

    # for seq_w, seq_t, seq_h, seq_r in zip(word_ids, tag_ids, head_ids, rel_ids):
    for sent_id in sent_ids:

        seq_w, seq_t, seq_h, seq_r, masks_w, masks_t = word_ids[sent_id], tag_ids[sent_id], \
                                                       head_ids[sent_id], rel_ids[sent_id], \
                                                       parser._masks_w[sent_id], parser._masks_t[sent_id]

        # if step % config.batch_size == 0 or not isTrain:
        if not isTrain:
            dy.renew_cg()

        loss, num_cor_arc, num_cor_rel = parser.run(seq_w, seq_t, seq_h, seq_r,
                                                    masks_w, masks_t, isTrain)
        losses.append(dy.sum_batches(loss))

        punct_count = 0

        for r in seq_r:
            if r == parser._punct_id:
                punct_count += 1

        tot_tokens += len(seq_w) - punct_count
        tot_cor_arc += num_cor_arc
        tot_cor_rel += num_cor_rel

        step += 1

        if (step % config.batch_size == 0
                or step == len(word_ids) - 1) and isTrain:
            # print(step, "\t/\t", len(sent_ids), flush=True)
            losses = dy.esum(losses)
            losses_value_arc = losses.value()
            losses.backward()
            # parser._trainer.update()
            parser.update_parameters()
            if step == len(word_ids) - 1:
                print(losses_value_arc)
            losses = []
            dy.renew_cg()
            if config.adam:
                parser._global_step += 1

        if (not isTrain) and step == len(sent_ids) - 1:
            score = (tot_cor_arc / tot_tokens)
            score_label = (tot_cor_rel / tot_tokens)
            print(score)
            print(score_label)
            if score > parser._best_score:
                parser._update = True
                parser._early_stop_count = 0
                parser._best_score = score

            if score_label > parser._best_score_las:
                parser._best_score_las = score_label

            print(parser._best_score)
            print(parser._best_score_las)

            if not config.adam:
                parser._global_step += 1
示例#11
0
# from pydriller import *
from git import *
import os
import shutil
import stat
import csv
import re
import subprocess
import SetData as sd
import parser as pa

pa.run()

filename, file_dict, file_hash = sd.InitData()

cnt = 0

# File is cloned based on url.
with open(filename, 'r') as txtinput:
    all_lines = txtinput.readlines()
    all_num = len(all_lines)

    for url_data in all_lines:
        cnt += 1
        try:
            if cnt == all_num:
                Git('clone_data').clone(url_data)
            else:
                url_data = url_data[:len(url_data) - 1]
                Git('clone_data').clone(url_data)
        except Exception as e:
示例#12
0
文件: app.py 项目: LuisRobSan/aeries
 def get_grades(self):
     conn = connect("grades.db")
     c = conn.cursor()
     parser.run(conn, c)
     return [line for line in c.execute("SELECT * FROM grades")]
    def do_parse(self,arg):
	parser.run(arg)
 def do_cat(self, arg):
     path = "/sys/kernel/debug/usb/usbmon/" + arg
     #print path
     parser.run(path)
示例#15
0
import app
import parser

if __name__ == "__main__":
    app.run('guangzhou', '2018-05-03', '2018-05-04')
    parser.run()
示例#16
0
#!/usr/bin/env python3

from config import Config
from data_service import DataService

from parser import run

import logging
logging.basicConfig(level=logging.INFO, filename='parse.log')

if __name__ == '__main__':
    config = Config()
    data_service = DataService(
        config.get('db_connection_url'),
        config.get('db_name'),
    )
    run(config.get('start_url'), data_service)
 def run(self):
   now = datetime.datetime.now()
   dataset = parser.run(now.month, 12)
   self.create_course_table()
   self.insert_course(dataset)
   self.output_json_file(dataset)
示例#18
0
def test():
    temp_dir = tempfile.mkdtemp()

    link = base64.b64decode(
        "aHR0cDovL2NvZGVmb3JjZXMuY29tL2NvbnRlc3QvMTE4MS9wcm9ibGVtL0E=").decode(
            "utf-8")
    print("Test: " + link)
    parser.run(temp_dir, link)
    errors = 0
    file_name = "cf_sample_1.in"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq("5 4 3".split(), f.read().split())
    file_name = "cf_sample_1.out"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq("3 1".split(), f.read().split())
    file_name = "cf_sample_2.in"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq("6 8 2".split(), f.read().split())
    file_name = "cf_sample_2.out"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq("7 0".split(), f.read().split())

    link = base64.b64decode(
        "aHR0cDovL2NvZGVmb3JjZXMuY29tL2NvbnRlc3QvMS9wcm9ibGVtL0I=").decode(
            "utf-8")
    print("Test: " + link)
    parser.run(temp_dir, link)
    file_name = "cf_sample_1.in"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq("2\nR23C55\nBC23".split(), f.read().split())
    file_name = "cf_sample_1.out"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq("BC23\nR23C55".split(), f.read().split())

    link = base64.b64decode(
        "aHR0cDovL2NvZGVmb3JjZXMuY29tL2NvbnRlc3QvNzAwL3Byb2JsZW0vRA==").decode(
            "utf-8")
    print("Test: " + link)
    parser.run(temp_dir, link)
    file_name = "cf_sample_1.in"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq(
            "7\n1 2 1 3 1 2 1\n5\n1 7\n1 3\n3 5\n2 4\n4 4".split(),
            f.read().split())
    file_name = "cf_sample_1.out"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq("10\n3\n3\n5\n0".split(), f.read().split())

    link = base64.b64decode(
        "aHR0cDovL2FjbXAucnUvaW5kZXguYXNwP21haW49dGFzayZpZF90YXNrPTc=").decode(
            "utf-8")
    print("Test: " + link)
    parser.run(temp_dir, link)
    file_name = "acmp_sample_1.in"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq("5 7 3".split(), f.read().split())
    file_name = "acmp_sample_1.out"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq("7".split(), f.read().split())
    file_name = "acmp_sample_2.in"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq("987531 234 86364".split(), f.read().split())
    file_name = "acmp_sample_2.out"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq("987531".split(), f.read().split())
    file_name = "acmp_sample_3.in"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq("189285 283 4958439238923098349024".split(),
                            f.read().split())
    file_name = "acmp_sample_3.out"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq("4958439238923098349024".split(), f.read().split())

    link = base64.b64decode(
        "aHR0cDovL2FjbXAucnUvaW5kZXguYXNwP21haW49dGFzayZpZF90YXNrPTQzMg=="
    ).decode("utf-8")
    print("Test: " + link)
    parser.run(temp_dir, link)
    file_name = "acmp_sample_1.in"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq(
            "5 10\n##......#.\n.#..#...#.\n.###....#.\n..##....#.\n........#.".
            split(),
            f.read().split())
    file_name = "acmp_sample_1.out"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq("3".split(), f.read().split())
    file_name = "acmp_sample_2.in"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq(
            "5 10\n##..#####.\n.#.#.#....\n###..##.#.\n..##.....#\n.###.#####".
            split(),
            f.read().split())
    file_name = "acmp_sample_2.out"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq("5".split(), f.read().split())

    link = base64.b64decode(
        "aHR0cDovL2FjbXAucnUvaW5kZXguYXNwP21haW49dGFzayZpZF90YXNrPTIzOQ=="
    ).decode("utf-8")
    print("Test: " + link)
    parser.run(temp_dir, link)
    file_name = "acmp_sample_1.in"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq(
            "4 3 3\n2 2 2\n2 0 0\n2 1 2\n2 2 2\n2 10 0 0\n1 5 1\n4 6 0 0 1".
            split(),
            f.read().split())
    file_name = "acmp_sample_1.out"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq("15".split(), f.read().split())

    link = base64.b64decode(
        "aHR0cDovL2FjbS50aW11cy5ydS9wcm9ibGVtLmFzcHg/c3BhY2U9MSZudW09MTAwMQ=="
    ).decode("utf-8")
    print("Test: " + link)
    parser.run(temp_dir, link)
    file_name = "timus_sample_1.in"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq(
            " 1427  0   \n   876652098643267843 \n5276538\n  \n   ".split(),
            f.read().split())
    file_name = "timus_sample_1.out"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq(
            "2297.0716\n936297014.1164\n0.0000\n37.7757".split(),
            f.read().split())

    link = base64.b64decode(
        "aHR0cDovL2FjbS50aW11cy5ydS9wcm9ibGVtLmFzcHg/c3BhY2U9MSZudW09MTIzMg=="
    ).decode("utf-8")
    print("Test: " + link)
    parser.run(temp_dir, link)
    file_name = "timus_sample_1.in"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq("11 5 2".split(), f.read().split())
    file_name = "timus_sample_1.out"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq("3\n0 3 7\n3 3 3\n3 –1 0".split(),
                            f.read().split())

    link = base64.b64decode(
        "aHR0cDovL2FjbS50aW11cy5ydS9wcm9ibGVtLmFzcHg/c3BhY2U9MSZudW09MTQxMg=="
    ).decode("utf-8")
    print("Test: " + link)
    parser.run(temp_dir, link)
    file_name = "timus_sample_1.in"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq("10 10\n2\n0.5 2\n2 10.1".split(),
                            f.read().split())
    file_name = "timus_sample_1.out"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq("99.666".split(), f.read().split())

    link = base64.b64decode(
        "aHR0cHM6Ly9hdGNvZGVyLmpwL2NvbnRlc3RzL2FiYzE0MS90YXNrcy9hYmMxNDFfYQ=="
    ).decode("utf-8")
    print("Test: " + link)
    parser.run(temp_dir, link)
    file_name = "atcoder_sample_1.in"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq("Sunny".split(), f.read().split())
    file_name = "atcoder_sample_1.out"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq("Cloudy".split(), f.read().split())
    file_name = "atcoder_sample_2.in"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq("Rainy".split(), f.read().split())
    file_name = "atcoder_sample_2.out"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq("Sunny".split(), f.read().split())

    link = base64.b64decode(
        "aHR0cHM6Ly9hdGNvZGVyLmpwL2NvbnRlc3RzL2FiYzE0MS90YXNrcy9hYmMxNDFfYw=="
    ).decode("utf-8")
    print("Test: " + link)
    parser.run(temp_dir, link)
    file_name = "atcoder_sample_1.in"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq("6 3 4\n3\n1\n3\n2".split(), f.read().split())
    file_name = "atcoder_sample_1.out"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq("No\nNo\nYes\nNo\nNo\nNo".split(),
                            f.read().split())
    file_name = "atcoder_sample_2.in"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq("6 5 4\n3\n1\n3\n2".split(), f.read().split())
    file_name = "atcoder_sample_2.out"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq("Yes\nYes\nYes\nYes\nYes\nYes".split(),
                            f.read().split())
    file_name = "atcoder_sample_3.in"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq(
            "10 13 15\n3\n1\n4\n1\n5\n9\n2\n6\n5\n3\n5\n8\n9\n7\n9".split(),
            f.read().split())
    file_name = "atcoder_sample_3.out"
    with open(os.path.join(temp_dir, file_name), "r") as f:
        print("Checking " + file_name)
        errors += assert_eq("No\nNo\nNo\nNo\nYes\nNo\nNo\nNo\nYes\nNo".split(),
                            f.read().split())

    shutil.rmtree(temp_dir)
    exit(errors)
示例#19
0
文件: main.py 项目: icyfork/pacapt-py
def main():
    args = parser.run()
    dispatch(args)
示例#20
0
 def do_cat(self, arg):
     path = "/sys/kernel/debug/usb/usbmon/" + arg
     #print path
     parser.run(path)
示例#21
0
import sys
sys.path.insert(0,'python-src')
sys.path.insert(0,'mic/')
import numpy as np
import pandas as pd
import time
import pymictools
import futil
import parser
import fs

if __name__ == "__main__":
    args = parser.run()
    if(args): #if there are arguments
        [micArgs,fsArgs,fullArgs] = args
        
        futil.createFolder("tmp")
        filename = futil.getFileName(fullArgs["filepath"])    
        if(fullArgs["dataset_orientation"]==0): #dataset column feature
            micInputPath = "tmp/"+filename
            mlInputPath = fullArgs["filepath"]
            futil.transposeDataset(fullArgs["filepath"],micInputPath)
            micArgs = micArgs.replace(fullArgs["filepath"],micInputPath)
        else: #dataset row feature
            micInputPath = fullArgs["filepath"]
            mlInputPath = "tmp/"+filename
            futil.transposeDataset(fullArgs["filepath"],mlInputPath)
            
        #Exectute ranking features
        initTime = time.time()
        MICCommand = "mictools"+micArgs
示例#22
0
 def run(self):
     now = datetime.datetime.now()
     dataset = parser.run(now.month, 12)
     self.create_course_table()
     self.insert_course(dataset)
     self.output_json_file(dataset)