def file_handler(): if request.method == 'GET': pages = request.args.get('pages', default=10, type=int) parse.parser(pages) rooms = request.args.get('rooms', default=2, type=int) data_clean.clean_data(rooms) return readFileHandler('clean_data.csv', request) else: text = request.form['text'] return writeFileHandler('clean_data.csv', text, request)
def main(argv): if len(argv) < 3: return 1 css = argv[1] html = argv[2] doc = ht.document_fromstring( open(html).read() ) css_text = open(css).read() rules = parser().parseString(css_text) tr = cs.HTMLTranslator() result_rules = [] rejected_rules = [] for r in rules: if check_rule(r, doc): result_rules.append( r ) print( r.text(), end='' ) else: print('rejected:', r.text(exclude=False), file=sys.stderr) rejected_rules.append( r ) print() print ("rules before:\t", len(rules), file=sys.stderr) print ("rules after:\t", len(result_rules), file=sys.stderr) #print ("rejected rules:", file=sys.stderr) #for r in rejected_rules: # print(r.text(exclude=False), file=sys.stderr, end='') sys.exit()
def main(argv): if len(argv) < 3: return 1 css = argv[1] html = argv[2] doc = ht.document_fromstring(open(html).read()) css_text = open(css).read() rules = parser().parseString(css_text) tr = cs.HTMLTranslator() result_rules = [] rejected_rules = [] for r in rules: if check_rule(r, doc): result_rules.append(r) print(r.text(), end='') else: print('rejected:', r.text(exclude=False), file=sys.stderr) rejected_rules.append(r) print() print("rules before:\t", len(rules), file=sys.stderr) print("rules after:\t", len(result_rules), file=sys.stderr) #print ("rejected rules:", file=sys.stderr) #for r in rejected_rules: # print(r.text(exclude=False), file=sys.stderr, end='') sys.exit()
def __init__(self, args): Qt.QApplication.__init__(self, args) #languages self.iso = iso639 self.languages = [] for language in self.iso.data: if self.iso.to_iso639_1(language['name']): self.languages.append(language['name']) #init self.parser = parser() #ask for xliff #add layout self.widget = Qt.QWidget() self.gridLayout = Qt.QGridLayout() self.widget.setLayout(self.gridLayout) #init widgets self.table = translationTable() self.sourceLanguageBox = Qt.QComboBox() self.targetLanguageBox = Qt.QComboBox() self.toLabel = Qt.QLabel("to") self.translateText = TextEdit() self.enterButton = Qt.QPushButton("enter") self.exportToiOS = Qt.QPushButton("export to iOS") self.exportToAndroid = Qt.QPushButton("export to Android") #modify widgets self.sourceLanguageBox.addItems(self.languages) self.targetLanguageBox.addItems(self.languages) #place widgets self.gridLayout.addWidget(self.table, 1, 0, 5, 5) self.gridLayout.addWidget(self.sourceLanguageBox, 0, 0, 1, 2) self.gridLayout.addWidget(self.targetLanguageBox, 0, 3, 1, 2) self.gridLayout.addWidget(self.toLabel, 0, 2, 1, 1) self.gridLayout.addWidget(self.translateText, 6, 0, 4, 2) self.gridLayout.addWidget(self.enterButton, 6, 4, 1, 1) self.gridLayout.addWidget(self.exportToiOS, 7, 4, 1, 1) self.gridLayout.addWidget(self.exportToAndroid, 8, 4, 1, 1) #show widgets self.widget.show() self.loadStrings() self.setLanguageBoxes() #Signals self.targetLanguageBox.currentIndexChanged.connect( self.table.wipeTranslations) self.translateText.enterPressed.connect(self.nextTranslation) self.exportToiOS.clicked.connect(self.createXLIFF) self.exportToAndroid.clicked.connect(self.createXML) self.targetLanguageBox.currentIndexChanged.connect(self.changeLanguage) self.table.keyPressed.connect(self.giveFocusToTextEdit) self.exec_()
def main(): args = parser() if args is None: exit() if args.verbose: print('Arguments parsed....') # Model instance vgg = VGG(args.model_path, args.pool_type, args.lalpha) if args.verbose: print('Model created....') # Content and Style Images content_image = load_image(os.path.join(args.content_path, args.content_image), max_size=args.max_size) style_images = [ load_image(os.path.join(args.style_path, image), shape=(content_image.shape[1], content_image.shape[0])) for image in args.style_images ] if args.verbose: print('Content and style images loaded....') if args.initial_type == 'content': init_gen_image = content_image elif args.initial_type == 'style': init_gen_image = style_images[0] elif args.initial_type == 'random': init_gen_image = get_content_image(content_image, args.noise_ratio, args.seed) if args.verbose: print('Generated image initialized....') # Stylize instance stylize = Stylize(vgg, content_image, style_images, init_gen_image, args) if args.verbose: print('Style-model created....') print('Generating image....') # Transfer style gen_image = stylize.transfer_style() if args.verbose: print('Image generated....') # Saving the image to destination path save_image(args.out_filepath, gen_image) if args.verbose: print('Generated image saved....') print('Completed!!!! :)')
def main(): os.system("clear") print "Removing all the previous known hosts " os.system("rm -f /root/.ssh/known_hosts") print " Scanning The Network " #scan.scan_now() print " Parsing the Data from the nodes " parse.parser() # time.sleep(5) l = raw_input("enter any key to continue ====> ") while True: os.system("clear") print(""" \t\t\t############################## \t\t\t# CONFIGURATION MODE # \t\t\t############################## \t\t\t1. Automatic Configuration \t\t\t2. Manual Configuration """) choice = raw_input("\tEnter Choice ") if int(choice) == 1: auto_config() break elif int(choice) == 2: man_config() break else: # noinspection PyUnusedLocal xx = raw_input("Enter a Valid Option! Press Any Key To Retry ") continue hdfs_config.hdfs_menu() mapred_config.mr_menu()
def __init__(self,nomUtilisateur,nomFichier): self.id = nomUtilisateur self.horloge = 0.0 self.annuaire = annuaire.Annuaire() self.scene = parse.parser(nomFichier, self.annuaire) #print self.annuaire.chercher("guide") #x = input("suite") # self.agents = agents.Agents() self.camera = camera.Camera(scene=self.scene) look, angle, up = self.scene.getLookAt() self.camera.setLookAt(look,angle,up) # init de la connexion reseau self.connexionPrincipale = reseau.Connexion() self.connexionPrincipale.emettre("ID %s" % (self.id))
def setup(): global textures global scene global laCamera glEnable(GL_DEPTH_TEST) glEnable(GL_TEXTURE_2D) glAlphaFunc(GL_GREATER,0.4) glEnable(GL_ALPHA_TEST) laCamera = camera.Camera() scene = parse.parser('scene.xml')
def __init__(self,nomUtilisateur,nomFichier): self.id = nomUtilisateur self.horloge = 0.0 self.annuaire = annuaire.Annuaire() self.scene = parse.parser(nomFichier, self.annuaire) self.camera = camera.Camera(scene=self.scene) look, angle, up = self.scene.getLookAt() self.camera.setLookAt(look,angle,up) self.avatars = avatar.Avatars() unAvatar = avatar.Avatar(url="../data/obj/penguin.obj") self.avatars.ajouter("pingouin",unAvatar) # init de la connexion reseau self.connexionPrincipale = reseau.Connexion(host='127.0.0.1',port=50000) self.connexionPrincipale.emettre("ID %s" % (self.id))
def prepare(chosen_file, set_length): dirname = os.getcwd() dirname += '/data/' data_parser = parser() files = [] for file in os.listdir(dirname): if file.endswith(".DAT"): files.append(file) chosen_test_set = chosen_file file_data_train = [] file_data_test = [] for i in range(len(files)): data_parser.parse_data(dirname + files[i]) temp = data_parser.get_all_com_fbk().values if i == chosen_test_set: file_data_test.append(temp) else: file_data_train.append(temp) train_sets = [] test_sets = [] for file in file_data_train: sets = list(chunk_list(file, set_length)) for set in sets: temp = dataset(set) if temp.num_steps < set_length: temp.pad_size(set_length) if temp.init_mean[0] <= 15 and temp.init_mean[1] <= 65: train_sets.append(temp) random.shuffle(train_sets) for file in file_data_test: sets = list(chunk_list(file, set_length)) for set in sets: temp = dataset(set) if temp.num_steps < set_length: temp.pad_size(set_length) if temp.init_mean[0] <= 15 and temp.init_mean[1] <= 65: test_sets.append(temp) return train_sets, test_sets
def crawl(self, base_url, paras, parser, writer=None): dl = self.downloader url = base_url % paras try: response = dl.session.get(url) except: print('cannot download url:[%s]' % url) print("sys err info:", sys.exc_info()[0]) print("start to sleep for %s" % self.sleep_time) time.sleep(self.sleep_time) print("Woke up, crawl again...") return self.crawl(base_url, paras, parser, writer) else: res = parser(response) if 'reaction' in res and res['reaction'] == 'retry': print("Ajax json is broken, I will sleep for 1' and retry...") time.sleep(60) res = self.crawl(base_url, paras, parser, writer) if writer: writer(res, url) return res
def test_conflict_pair(self): mock_parser = parser() hc_classes = [111, 108, 359, 714, 1] pref_dict = {1: [111, 108, 359, 714], 2: [1]} correct_result = { 111: { 108: 1, 359: 1, 714: 1, 1: 0 }, 108: { 111: 1, 359: 1, 714: 1, 1: 0 }, 359: { 111: 1, 108: 1, 714: 1, 1: 0 }, 714: { 111: 1, 108: 1, 359: 1, 1: 0 }, 1: { 111: 0, 108: 0, 359: 0, 714: 0 } } result = mock_parser.conflict_pair(hc_classes, pref_dict) maximum = 1 self.assertEqual(result[0], correct_result) self.assertEqual(result[1], maximum)
def from_dict(cls, d: dict) -> 'CodePack': root = None stack = list() codes = dict() receive = dict() for i, line in enumerate(d['structure'].split('\n')): # os.linesep split_idx = line.index('Code') hierarchy = len(line[1:split_idx - 1]) code_str = line[split_idx:] p = parser(Code.blueprint(code_str)) attr = p.parse(code_str).named if attr['id'] not in codes: codes[attr['id']] = Code(id=attr['id'], source=d['source'][attr['id']], env=attr.get('env', None), image=attr.get('image', None), owner=attr.get('owner', None)) code = codes[attr['id']] receive[code.id] = literal_eval(attr['receive']) if i == 0: root = code while len(stack) and stack[-1][1] >= hierarchy: n, h = stack.pop(-1) if len(stack) > 0: stack[-1][0] >> n stack.append((code, hierarchy)) while len(stack): n, h = stack.pop(-1) if len(stack) > 0: stack[-1][0] >> n for id, code in codes.items(): for arg, sender in receive[id].items(): code.receive(arg) << codes[sender] return cls(d['_id'], code=root, subscribe=d['subscribe'], owner=d.get('owner', None))
def test_sorted_conflict_pair(self): mock_parser = parser()
def __init__(self, args): Qt.QApplication.__init__(self, args) #languages self.iso = iso639 self.languages = [] for language in self.iso.data: if self.iso.to_iso639_1(language['name']): self.languages.append(language['name']) #init self.parser = parser() #ask for xliff #add layout self.widget = Qt.QWidget() self.gridLayout = Qt.QGridLayout() self.widget.setLayout(self.gridLayout) #init widgets self.table = translationTable() self.sourceLanguageBox = Qt.QComboBox() self.targetLanguageBox = Qt.QComboBox() self.toLabel = Qt.QLabel("to") self.translateText = TextEdit() self.enterButton = Qt.QPushButton("enter") self.exportToiOS = Qt.QPushButton("export to iOS") self.exportToAndroid = Qt.QPushButton("export to Android") #modify widgets self.sourceLanguageBox.addItems(self.languages) self.targetLanguageBox.addItems(self.languages) #place widgets self.gridLayout.addWidget(self.table, 1,0,5,5) self.gridLayout.addWidget(self.sourceLanguageBox, 0,0, 1,2) self.gridLayout.addWidget(self.targetLanguageBox,0,3,1,2) self.gridLayout.addWidget(self.toLabel,0,2,1,1) self.gridLayout.addWidget(self.translateText, 6,0,4,2) self.gridLayout.addWidget(self.enterButton,6,4,1,1) self.gridLayout.addWidget(self.exportToiOS,7,4,1,1) self.gridLayout.addWidget(self.exportToAndroid,8,4,1,1) #show widgets self.widget.show() self.loadStrings() self.setLanguageBoxes() #Signals self.targetLanguageBox.currentIndexChanged.connect(self.table.wipeTranslations) self.translateText.enterPressed.connect(self.nextTranslation) self.exportToiOS.clicked.connect(self.createXLIFF) self.exportToAndroid.clicked.connect(self.createXML) self.targetLanguageBox.currentIndexChanged.connect(self.changeLanguage) self.table.keyPressed.connect(self.giveFocusToTextEdit) self.exec_()
ir += " " + node.data return ir if __name__ == "__main__": with open("testfiles/testfile_1.txt", 'r') as test: code = test.read() scan = scanner(code) scan.lexical() tokens = scan.tokens for token in tokens: print(token) print() parsing = parser(tokens, "grammar2.txt") print("LL Grammar") for i in parsing.grammar: for j in parsing.grammar[i]: if j[0] == '': print(i, '->', "''") else: print(i, '->', ' '.join(j)) parsing.get_FIRST() parsing.get_FOLLOW() print("\nFIRST") for i in parsing.first: print(i, parsing.first[i])
fn = ''#{{{ exp = '' for i,arg in enumerate(sys.argv): if arg == '-f': fn = sys.argv[i+1] elif arg == '-exp': exp = sys.argv[i+1] if fn == '': print 'Please specify a data file' sys.exit(1) if exp == '': print "You're very dull" sys.exit(1)#}}} file_dict=parser(fn) ''' Format: {file_id:{'modification_time': 1231285728.0, 'user_id': 0, 'file_id': 5030, 'block_size_in_bytes': 524288, 'path_to_file': ' /0/1/2/3/4/5/6/7/8/9', 'size_in_bytes': 1000, 'group_id': 0, 'creation_time': 1231285728.0, 'permissions': '-rwxrwxr--'}...} ''' if exp == 'unique-fid': get_unique_items(file_dict,'file_id') elif exp == 'unique-uid': get_unique_items(file_dict,'user_id') elif exp == 'unique-gid':
def get_files(path): file_dict = parser(path) return file_dict
#Number of requests for which we obtain least MSE (only requests of lengths that are multiples of 2 are considered) from parse import parser import states import chained import numpy as np from sklearn.metrics import mean_squared_error order = 2 y_true = [] y_pred = [] mse = [] p = parser() p.parse_file() #File name can be changed in parse.py text = p.write_bytes text = states.write_bytes_states(text) pred = chained.markov_chain(text[:2], order) print pred y_true.append(np.float(text[3])) y_pred.append(np.float(pred[0])) mse.append(mean_squared_error(y_true, y_pred)) print 'len' + str(len(text)) for j in xrange(4, len(text) - 8, 8): for i in xrange(j, (j * 2) + 1, 2): pred = chained.markov_chain(text[:i], order) y_true = list() y_pred = list() y_true.append([float(s) for s in (text[i + 1:(2 * i - 1)])]) y_pred.append([float(s) for s in (pred[:i - 2])])
def inter_arrival(): p = parser() p.parse_file() text = p.timestamp text = states.interarrival_states(text) chained.markov_chain(text, order)
################################################### help(reticle.alignMark) issubclass(reticle.alignMark, reticle.overhead) eQuadMainL0 = reticle.ebeamQuad() isinstance(eQuadMainL0, reticle.ebeamQuad) ################################################### # UT-1 Parse the primary Leica portion of # a Photo Info file ################################################### read = reader.reader() read.setName("pa0756.photo") #read.setName("pa0758.photo") read.setPath("./testTargets/") read.read() leicaParse = parse.parser() leicaParse.setLines(read.getLines()) leicaParse.setBegString("Leica Marks") leicaParse.setEndString("Scan") leicaParse.parse() leicaParse.getbOccur() write = writer.writer() write.setName("leica.parse") write.setPath("./") write.setLines(leicaParse.getpLines()) write.write() ############################### # Parse out primary lines text = leicaParse.getpLines() pLines = []
# Usage: # python cluster_and_count.py <LANL_snapshot_file> DBSCAN_epsilon_value DBSCAN_n_value # if len(sys.argv) == 1: path_to_snapshots = "/Users/ian/Desktop/datasets/lanl_fs/anon-all-fs/" snapshot_file_path = path_to_snapshots + "anon-lnfs-fs4.txt" eps = 10 n = 100 else: snapshot_file_path = sys.argv[1] eps = sys.argv[2] n = sys.argv[3] #parse the snapshot into a file dictionary file_dict = parse.parser(snapshot_file_path) #get overall counts users, groups, files = get_counts(file_dict) print "--Counts--" print "Users:", users print "Groups:", groups print "Files:", files #cluster #pull out the mtimes from the file_dict #NOTE WE ARE TRUNCATING AT 2000 files initially keys = file_dict.iterkeys()
from __future__ import print_function import sys, os from pprint import pprint sys.path.append( os.path.join( os.path.dirname( os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), 'src')) from parse import initialize_optimade_parser, parser ls = initialize_optimade_parser(spaces_hack=True) demo_string = 'filter=NOT a > b OR c = 100 AND f = "C2 H6"' filter_ast = parser(ls, demo_string) pprint(filter_ast) assert (filter_ast == ('Filter', ('Keyword', ('filter=', 'filter=')), ('Expression', ('Term', ('Atom', ('NOT', 'NOT'), ('Comparison', ('Value', ('Identifier', 'a')), ('Operator', '>'), ('Value', ('Identifier', 'b'))))), ('OR', 'OR'), ('Expression', ('Term', ('Atom', ('Comparison', ('Value', ('Identifier', 'c')), ('Operator', '='),
import parse as ps import codecs p = ps.parser() result = 'hostname,tld,port,path,queue,segment\n' with codecs.open('../data/url-data.csv', encoding='utf-8') as f: data = f.read() data_arr = data.split('\n') del (data_arr[0]) for line in data_arr: line_result = '' line_arr = line.split(',') if len(line_arr) > 1 and line_arr[1] == 'bad': _, hostname, tld, port, path, queue, segment = p.parse(line_arr[0]) if not hostname: continue parts_arr = [hostname, tld, port, path, queue, segment] for part in parts_arr: if part: line_result += part + ',' else: line_result += ',' line_result = line_result[:-1] + '\n' result += line_result with codecs.open('../data/url-data2.csv', encoding='utf-8') as f: data = f.read() data_arr = data.split('\n') del (data_arr[0])
import parse import evalu #for one loop, interpreter receives input string from user and it prints results. #var_dict is dictionary for user's variable. print("LISP Interpreter") var_dict = {} while (1): buffer = input('>') while (1): stack = [] for i in buffer: if (i == "("): stack.append(i) elif (i == ")"): stack.pop() if (len(stack) != 0): # 괄호검사후 짝이 안맞으면 다시 입력값을 받는다 print("Not match pair of '()'") break else: try: # 괄호검사후 짝이 맞으면 코드를 처리해준다. lex_result = lex.lexer(buffer) parse_result = parse.parser(var_dict, lex_result) evalu.sementic_analysis(var_dict, parse_result) except NotImplementedError: pass break
def should_normalize_whitespace(self): text = "Capacity limits of information processing\n in the brain" parsed = parse.parser(text) assert parsed.words() == "Capacity limits of information processing in the brain"
def casm_parse(tokens): ast = parser()(tokens, 0) return ast.value
def ost_chain(): p = parser() p.parse_file() text = p.stripe_count text = states.ost_stripe_states(text) chained.markov_chain(text, order)
def func(): """ Input Parameters ------------------------------------------------------------ """ thresh_init = 5000000 file_path = "anon-lnfs-fs4.txt" n_clusters_lo = 10 n_clusters_hi = 11 bin = 6 exclude_uid = True """ ------------------------------------------------------------ """ # parse contents snapshot_path = file_path file_dict = parse.parser(snapshot_path) n_samples = len(file_dict) # obtain different permutations for further observation input = [ "file_id", "user_id", "group_id", "size_in_bytes", "creation_time", "modification_time", "block_size_in_bytes" ] #input_list = itertools.permutations(input, 3) input_list = [["creation_time", "modification_time", "user_id"]] # plotting for single_input in input_list: x_axis, y_axis, z_data = single_input # label the units x_label = x_axis + ' (unit: s)' if 'time' in x_axis else x_axis y_label = y_axis + ' (unit: s)' if 'time' in y_axis else y_axis cbar_label = z_data + ' (unit: s)' if 'time' in z_data else z_data # binary search for appropiate threshold point thresh = thresh_init thresh_lo = 0 thresh_hi = 3 * thresh_init # initialize data lists x = [] y = [] z = [] for file_id, file_data in file_dict.items(): # add normalization x.append(file_data[x_axis]) y.append(file_data[y_axis]) z.append(file_data[z_data]) data = numpy.concatenate(([x], [y])) data = data.T x_ex = [] y_ex = [] z_ex = [] if exclude_uid: for index in xrange(len(z)): if z[index] != 0: x_ex.append(x[index]) y_ex.append(y[index]) z_ex.append(z[index]) x = x_ex y = y_ex z = z_ex data = numpy.concatenate(([x], [y])) data = data.T # clustering with expected clusters while True: clusters = hcluster.fclusterdata(data, thresh, criterion="distance") count_clusters = len(set(clusters)) if thresh_lo >= thresh_hi: break if count_clusters > n_clusters_hi: thresh_lo = thresh thresh = (thresh + thresh_hi) / 2 continue if count_clusters < n_clusters_lo: thresh_hi = thresh thresh = (thresh_lo + thresh) / 2 continue break n_clusters = [] for i in clusters: if i not in n_clusters: n_clusters.append(i) # uid aux = [] for i in xrange(bin): aux.append(i) counter = collections.Counter(z) most_common = [] most_common = counter.most_common(bin) zz2 = [] for i in xrange(len(z)): zz2.append(bin / 2) for i, (num, frequency) in enumerate(most_common): color = bin / 2 if i % 2 == 0: color = aux.pop(0) else: color = aux.pop(len(aux) - 1) for j in xrange(len(z)): if num == z[j]: zz2[j] = color shapes = ['o', 'h', 'D', 'v', '^', 's', '<', '*', '>', 'H', '.'] cm = plt.cm.get_cmap('RdYlBu') legend_name_list = [] for i in xrange(len(n_clusters)): points = [] z2 = [] for pos, j in enumerate(clusters): if j == n_clusters[i]: points.append(data[pos]) z2.append(zz2[pos]) legend_name_list.append(str(i)) plt.scatter(*numpy.transpose(points), c=z2, s=100, alpha=1, marker=shapes[i], linewidths=0, label=i, cmap=cm) # set label plt.xlabel(x_label) plt.ylabel(y_label) # set title exp = 0 thresh_decinal = float(thresh) while thresh_decinal > 2: thresh_decinal /= 10 exp += 1 thresh_standard_form = '%.2f * 10^%d' % (thresh_decinal, exp) title = "threshold: %s, number of clusters: %d" % ( thresh_standard_form, len(set(clusters))) plot_title = plt.title(title) plot_title.set_position([.5, 1.05]) # handle legends plt.legend(loc='lower right', markerscale=0.6, title='clusters') ax, _ = mpl.colorbar.make_axes(plt.gca(), shrink=1) #cbar = mpl.colorbar.ColorbarBase(ax, cmap=cm, # norm=mpl.colors.Normalize(vmin=min(z), vmax=bin)) bounds = numpy.linspace(0, bin, bin + 1) norm = mpl.colors.BoundaryNorm(bounds, cm.N) cbar = mpl.colorbar.ColorbarBase(ax, cmap=cm, norm=norm, spacing='proportional', ticks=bounds, boundaries=bounds, format='%1i') cbar.set_clim(0, bin) cbar.set_label(cbar_label) # save images file_name = x_axis + ' ' + y_axis + ' ' + z_data #plt.savefig(file_name) plt.show() plt.clf()
def wb_chain(): p = parser() p.parse_file() #File name can be changed in parse.py text = p.write_bytes text = states.write_bytes_states(text) chained.markov_chain(text, order)
def get_context_data(self, **kwargs): context = super(IndexView, self).get_context_data(**kwargs) context['result'] = parse.parser(settings.BASE_DIR + "/vasprun.xml") return context
def rb_chain(): p = parser() p.parse_file() text = p.read_bytes text = states.write_bytes_states(text) chained.markov_chain(text, order)
difference = max_steps - self.num_steps filler = np.zeros(difference) self.ycom = np.concatenate((self.ycom, filler)) self.yfbk = np.concatenate((self.yfbk, filler)) self.errors = np.concatenate((self.errors, filler)) self.num_steps = len(self.ycom) actions = [ 1, .9, .8, .7, .6, .5, .4, .3, .2, .1, 0, -.1, -.2, -.3, -.4, -.5, -.6, -.7, -.8, -.9, -1 ] action_size = len(actions) dirname = os.getcwd() dirname += '/data/' data_parser = parser() files = [] for file in os.listdir(dirname): if file.endswith(".DAT"): files.append(file) file_data = [] for file in files: data_parser.parse_data(dirname + file) temp = data_parser.get_y().values file_data.append(temp) set_length = 500 datasets = [] for file in file_data: sets = list(chunk_list(file, set_length))
not_nones = [ arg for arg in ['verify', 'output'] if getattr(args, arg) not in {None, False} ] if len(not_nones) != 1: raise ValueError( 'One and only one argument in (v/verify, o/output) must be specified' ) mode = not_nones[0] logging.info(f'Selected mode is "{mode}"') with open(args.config, 'r') as file: config = json.load(file) logger.info(f'Configuration is {config}') for file_path in files: with open(file_path, 'r') as file: contents = file.read() parsed = parser(contents, file_path) formatted = formatter(*parsed, file_path, config) if mode == 'output': output_file_path = file_path if args.output_prefix is None else os.path.join( args.output_prefix, file_path) output_file_folder = os.sep.join( output_file_path.split(os.sep)[:-1]) os.makedirs(output_file_folder, exist_ok=True) with open(output_file_path, 'w') as file: file.write(formatted)
def __init__(self, res): self.__parser = parser(res) self.__good = False
import knowledge, parse, commands print "Beginning processing..." know = knowledge.knowledge() pars = parse.parser( know ) comm = commands.commands( know, pars ) while True: data = raw_input(">>>") res = comm( data ) if res: if res < 0: break continue #else process as usual pars( data )