def __init__(self, doc): """ Perform syntactic simplification rules. @param doc: document to be simplified. """ self.sentences = open(doc, "r").read().strip().split("\n") ## markers are separated by their most used sense self.time = ['when', 'after', 'since', 'before', 'once'] self.concession = ['although', 'though', 'but', 'however', 'whereas'] self.justify = ['because', 'so', 'while'] self.condition = ['if'] self.condition2 = ['or'] self.addition = ['and'] ## list of all markers for analysis purposes self.cc = self.time + self.concession + self.justify + self.condition + self.addition + self.condition2 ## list of relative pronouns self.relpron = ['whom', 'whose', 'which', 'who'] ## initiates parser server self.parser = Parser() ## Generation class instance self.generation = Generation(self.time, self.concession, self.justify, self.condition, self.condition2, self.addition, self.cc, self.relpron)
def upload(): file = request.files['inputFile'] addForm = additionalInfoForm(request.form) p = Parser() item = p.parseFile(file.read(), file) return (render_template('uploadForm.html', form=addForm, fileName=file, smilesStr=item[0], massStr=item[1], filename=getattr(file, 'filename', None)))
def translate(source_file, template_file): """ Translate source_file (a YAML resume) to a LaTeX resume following template_file """ parser = Parser() data = parser.read(source_file) contact = Contact(data.get('contact')) sections = [Section(section, data.get(section)) for section in data.get('order')] formatter = LaTeXFormatter() return formatter.format(template_file, contact, sections)
def process_message(self, connector, host, secret_key, resource, parameters): logger = logging.getLogger(__name__) logger.debug('resource {}'.format(resource)) logger.debug('parameters {}'.format(parameters)) inputfile = resource["local_paths"][0] file_id = resource['id'] datasetid = parameters['datasetId'] extracted = False with Parser() as p: for fp in p.items(inputfile): # add yaml file fid = files.upload_to_dataset(connector, host, secret_key, datasetid, fp, check_duplicate=True) tags = {'tags': ['YNeeded']} files.upload_tags(connector, host, secret_key, fid, tags) os.remove(fp) extracted = True if extracted: # set tags tags = {'tags': ['CSVExtracted']} files.upload_tags(connector, host, secret_key, file_id, tags)
def __init__(self, doc): """ Perform syntactic simplification rules for Galician (this code is based on the one available at simpatico_ss/simplify.py for English). TODO: Relative and conjoint clauses are not supported by the Galician parser. Functions for these cases were left here as examples for feature implementations. @param doc: document to be simplified. """ self.sentences = open(doc, "r").read().strip().split("\n") ## markers are separated by their most used sense self.time = ['when', 'after', 'since', 'before', 'once'] self.concession = ['although', 'though', 'but', 'however', 'whereas'] self.justify = ['because', 'so', 'while'] self.condition = ['if'] self.condition2 = ['or'] self.addition = ['and'] ## list of all markers for analysis purposes self.cc = self.time + self.concession + self.justify + self.condition + self.addition + self.condition2 ## list of relative pronouns self.relpron = ['whom', 'whose', 'which', 'who'] ## initiates parser server self.parser = Parser() ## Generation class instance self.generation = Generation(self.time, self.concession, self.justify, self.condition, self.condition2, self.addition, self.cc, self.relpron)
def main(): """Runs the CLI.""" try: topics_path = os.environ["TOPICS_PATH"] except KeyError: print("E: TOPICS_PATH environment variable not set") sys.exit(2) topics = TopicList(topics_path) args = Parser(topics, __doc__, __version__) status = args.cmd(topics=args.enabled, name=args.name, dir=args.dir, args=args.raw).run() sys.exit(status)
def process_file(f): class Handler(object): def __init__(self): self.status = None def start_element(self, name, attrs): if name in ('p', 'P'): self.status = P def end_element(self, name): if name in ('p', 'P'): self.status = None def char_data(self, data): #pprint(data) if self.status == P: if good(data): sentences.append(''.join(map(lambda x: ' ' if x == '\n' else x, data)) + '\n') sentences = [] parser = Parser(Handler()) parser.ParseFile(f) return sentences
def __init__(self, filename, alg): """ build up city coordinates. :param filename: tsp file name """ self.filename = filename self.city_list = [] self.alg = alg __raw_data = Parser.parse(filename) # self.citylist = <x_coordinate, y_coordinate, city_index> for item in __raw_data['map']: self.city_list.append((item[1], item[2], item[0]))
def __init__(self, *initial_data, **kwargs): self.is_source = False for dictionary in initial_data: for key in dictionary: setattr(self, key, dictionary[key]) for key in kwargs: setattr(self, key, kwargs[key]) if not hasattr(self, 'position'): self.position = [1,1] self.position = ps._interprete_well(self.position) if hasattr(self, 'plate'): self.plate.replaceWell(self)
def search_results(search): print("hit search") results = [] search_string = search.data['search'] if search.data['search'] == '': p = Parser() results = p.returnAllRes() print(results) return redirect('/') if not results: print("not found") flash('No results found!') return redirect('/') else: # display results p = Parser() results = p.returnAllRes() print(results) return redirect('/')
def return_parser(): """ Defines and returns argparse ArgumentParser object. :return: ArgumentParser """ parser = Parser("Simple token based rnn for language modeling from glove vectors.") parser.add_argument('-inputdir', type=str, default='input_data/numpy/') parser.add_argument('-learnrate', type=float, default=0.00272473811408, help='Step size for gradient descent.') parser.add_argument("-lm_layers", nargs='+', type=int, default=[128], help="A list of hidden layer sizes.") parser.add_argument('-mb', type=int, default=16, help='The mini batch size for stochastic gradient descent.') parser.add_argument('-debug', action='store_true', help='Use this flag to print feed dictionary contents and dimensions.') parser.add_argument('-maxbadcount', type=str, default=10, help='Threshold for early stopping.') parser.add_argument('-random_seed', type=int, default=5, help='Random seed for reproducible experiments.') parser.add_argument('-verbose', type=int, default=1, help='Whether to print loss during training.') parser.add_argument('-decay', action='store_true', help='whether to use learnrate decay') parser.add_argument('-decay_rate', type=float, help='rate to decay step size') parser.add_argument('-decay_steps', type=int, help='how many steps to perform learnrate decay') parser.add_argument('-random', action='store_true', help='Whether to initialize embedding vectors to random values') parser.add_argument('-fixed', action='store_true') parser.add_argument('-epochs', type=float, default=3, help='Maximum epochs to train on. Need not be in whole epochs.') parser.add_argument('-outfile', type=str, default='test_make_lstm_classifier.txt') parser.add_argument('-l2', type=float, default=0.0) parser.add_argument('-partition', type=str, default='both', help='Can be "both", "desc", or "title"') parser.add_argument('-modelsave', type=str, default='saved_model/', help='Directory to save trained model in.') # 16 0.00272473811408 128 0 0 0.245176650126 0.94024 0.905526 0.983861 0.943069 return parser
def __init__(self, input_path, output_path): self.parser = Parser() self.output = [] self.input_path = input_path self.output_path = output_path
class Simplify(): def __init__(self, doc): """ Perform syntactic simplification rules. @param doc: document to be simplified. """ self.sentences = open(doc, "r").read().strip().split("\n") ## markers are separated by their most used sense self.time = ['when', 'after', 'since', 'before', 'once'] self.concession = ['although', 'though', 'but', 'however', 'whereas'] self.justify = ['because', 'so', 'while'] self.condition = ['if'] self.condition2 = ['or'] self.addition = ['and'] ## list of all markers for analysis purposes self.cc = self.time + self.concession + self.justify + self.condition + self.addition + self.condition2 ## list of relative pronouns self.relpron = ['whom', 'whose', 'which', 'who'] ## initiates parser server self.parser = Parser() ## Generation class instance self.generation = Generation(self.time, self.concession, self.justify, self.condition, self.condition2, self.addition, self.cc, self.relpron) def transformation(self, sent, ant, justify=False): """ Transformation step in the simplification process. This is a recursive method that receives two parameters: @param sent: sentence to be simplified. @param ant: previous sentence. If sent = ant, then no simplification should be performed. @param justify: controls cases where sentence order is inverted and should invert the entire recursion. @return: the simplified sentences. """ def remove_all(aux, item): """ Remove all incidences of a node in a graph (needed for graphs with cycles). @param aux: auxiliary of parse structure @param item: node to be removed """ for a in aux.keys(): for d in aux[a].keys(): if item in aux[a][d]: aux[a][d].remove(item) def recover_punct(final, s): """ Recover the punctuation of the sentence (needed because the dependency parser does not keep punctuation. @param final: the final dictionary with the words in order @param s: the tokenised sentence with the punctuation marks @return the final dictionary with the punctuation marks """ char_list = "``\'\'" ant = 0 for k in sorted(final.keys()): if int(k) - ant == 2: if s[k - 2] in string.punctuation + char_list: final[k - 1] = s[k - 2] elif s[k - 2] == "-RRB-": final[k - 1] = ")" elif s[k - 2] == "-LRB-": final[k - 1] = "(" if int(k) - ant == 3: if s[k - 2] in string.punctuation + char_list and s[ k - 3] in string.punctuation + char_list: final[k - 1] = s[k - 2] final[k - 2] = s[k - 3] ant = k return final def build(root, dep, aux, words, final, yes_root=True, previous=None): """ Creates a dictionary with the words of a simplified clause, following the sentence order. This is a recursive method that navigates through the dependency tree. @param root: the root node in the dependency tree @param dep: the dependencies of the root node @param aux: the auxiliary parser output @param words: auxiliary parsed words @param final: dictionary with the positions and words @param yes_root: flag to define whether or not the root node should be included @param previous: list of nodes visited """ ## controls recursion if previous == None: previous = [] if root in previous: return previous.append(root) ## for cases where the rule does not include the root node if yes_root: final[root] = words[root - 1][0] previous.append(root) for k in dep.keys(): for i in dep[k]: if i in aux.keys(): deps = aux[i] ## needed for breaking loops -- solved by the recursion condition #for d in deps.keys(): # if i in deps[d]: # deps[d].remove(i) build(i, deps, aux, words, final, previous=previous) final[i] = words[i - 1][0] def conjoint_clauses(aux, words, root, deps_root, ant, _type, rel): """ Simplify conjoint clauses @param aux: auxiliary parser output @param words: auxiliary words and POS tags structure @param root: root node in the dependency tree @param deps_root: dependencies of the root node @param ant: previous sentence (for recursion purposes) @param _type: list of markers found in the sentence that can indicate conjoint clauses @param rel: parser relation between the main and the dependent clause (can be 'advcl' or 'conj') @return: a flag that indicates whether or not the sentence was simplified and the result sentence (if flag = False, ant is returned) """ ## split the clauses others = deps_root[rel] pos = 0 s1 = s2 = "" v_tense = "" for o in others: flag = True if o not in aux: flag = False continue deps_other = aux[o] ## check the marker position ('when' is advmod, while others are mark) if 'advcl' in rel: if 'mark' in deps_other.keys(): mark = deps_other['mark'][0] mark_name = words[mark - 1][0].lower() elif 'advmod' in deps_other.keys(): mark = deps_other['advmod'][0] mark_name = words[mark - 1][0].lower() else: flag = False #needed for broken cases continue else: if 'cc' in deps_root.keys() and 'conj' in rel: conj = deps_root[rel][0] if 'VB' in words[conj - 1][1][ 'PartOfSpeech'] and 'VB' in words[root - 1][1][ 'PartOfSpeech']: #needed for broken cases like 'Care and support you won't have to pay towards' mark = deps_root['cc'][0] mark_name = words[mark - 1][0].lower() else: flag = False continue else: flag = False continue ## hack for simpatico use cases if mark_name == "and" and words[mark - 2][0].lower( ) == "care" and words[mark][0].lower() == "support": flag = False continue ## dealing with cases without subject if 'nsubj' not in deps_other and 'nsubj' in deps_root: deps_other['nsubj'] = deps_root['nsubj'] elif 'nsubj' not in deps_other and 'nsubjpass' in deps_root: deps_other['nsubj'] = deps_root['nsubjpass'] elif 'nsubj' not in deps_other and 'nsubj' not in deps_root: flag = False continue ## check if the marker is in the list of selected markers if mark_name in _type: ## check if verbs have objects tag_list = ('advcl', 'xcomp', 'acomp', 'amod', 'appos', 'cc', 'ccomp', 'dep', 'dobj', 'iobj', 'nwe', 'pcomp', 'pobj', 'prepc', 'rcmod', 'ucomp', 'nmod', 'auxpass', 'advmod', 'prep') #if not any([t in tag_list for t in deps_root.keys()]): #return False, ant # flag = False # continue #elif not any([t in tag_list for t in deps_other.keys()]): #return False, ant # flag = False # continue #if (len(deps_root) < 2 or len(deps_other) < 2): # return False, ant ## delete marker and relation from the graph if 'advcl' in rel: if 'mark' in deps_other.keys(): del deps_other['mark'][0] elif 'advmod' in deps_other.keys(): del deps_other['advmod'][0] else: del deps_root['cc'][0] #del deps_root[rel][pos] #pos+=1 deps_root[rel].remove(o) ## for cases with time markers -- This + modal + happen modal = None if 'aux' in deps_root and mark_name in self.time: modal_pos = deps_root['aux'][0] modal = words[modal_pos - 1][0] ## for cases either..or with the modal verb attached to the main clause if 'aux' in deps_root and mark_name in self.condition2: deps_other['aux'] = deps_root[u'aux'] ## built the sentence again final_root = {} build(root, deps_root, aux, words, final_root) final_deps = {} build(o, deps_other, aux, words, final_deps) ## TODO: remove this part from here --> move to another module: self.generation root_tag = words[root - 1][1]['PartOfSpeech'] justify = True #if ((root > o) and (mark_name in self.time and mark>1)) or (mark_name == 'because' and mark > 1): if (root > o) or (mark_name == 'because' and mark > 1): if (mark_name in self.time and mark == 1): sentence1, sentence2 = self.generation.print_sentence( final_root, final_deps, root_tag, mark_name, mark, modal) else: sentence1, sentence2 = self.generation.print_sentence( final_deps, final_root, root_tag, mark_name, mark, modal) else: sentence1, sentence2 = self.generation.print_sentence( final_root, final_deps, root_tag, mark_name, mark, modal) s1 = self.transformation(sentence1, ant, justify) s2 = self.transformation(sentence2, ant) flag = True else: flag = False continue if flag: return flag, s1 + " " + s2 else: return flag, ant def relative_clauses(aux, words, root, deps_root, ant, rel): """ Simplify relative clauses @param aux: auxiliary parser output @param words: auxiliary words and POS tags structure @param root: root node in the dependency tree @param deps_root: dependencies of the root node @param ant: previous sentence (for recursion purposes) @param rel: parser relation between the main and the dependent clause (can be 'nsubj' or 'dobj') @return: a flag that indicates whether or not the sentence was simplified and the result sentence (if flag = False, ant is returned) """ subj = deps_root[rel][0] if subj not in aux.keys(): return False, ant deps_subj = aux[subj] if 'acl:relcl' in deps_subj.keys() or 'rcmod' in deps_subj.keys(): if 'acl:relcl' in deps_subj.keys(): relc = deps_subj['acl:relcl'][0] type_rc = 'acl:relcl' else: relc = deps_subj['rcmod'][0] type_rc = 'rcmod' deps_relc = aux[relc] if 'nsubj' in deps_relc.keys(): subj_rel = 'nsubj' elif 'nsubjpass' in deps_relc.keys(): subj_rel = 'nsubjpass' if 'ref' in deps_subj: to_remove = deps_subj['ref'][0] mark = words[deps_subj['ref'][0] - 1][0].lower() else: to_remove = deps_relc[subj_rel][0] mark = words[deps_relc[subj_rel][0] - 1][0].lower() if mark in self.relpron: deps_relc[subj_rel][0] = subj remove_all(aux, to_remove) elif 'dobj' in deps_relc: ## needed for cases where the subject of the relative clause is the object obj = deps_relc['dobj'][0] if 'poss' in aux[obj]: mod = aux[obj]['poss'][0] aux_words = list(words[mod - 1]) aux_words[0] = words[subj - 1][0] + '\'s' words[mod - 1] = tuple(aux_words) aux[mod] = aux[subj] else: return False, ant else: return False, ant #for borken cases - " There are some situations where it is particularly important that you get financial information and advice that is independent of us." del aux[subj][type_rc] if 'punct' in deps_subj.keys(): del aux[subj]['punct'] final_root = {} build(root, deps_root, aux, words, final_root) final_relc = {} build(relc, deps_relc, aux, words, final_relc) if justify: sentence2, sentence1 = self.generation.print_sentence( final_root, final_relc) else: sentence1, sentence2 = self.generation.print_sentence( final_root, final_relc) s1 = self.transformation(sentence1, ant, justify) s2 = self.transformation(sentence2, ant) return True, s1 + " " + s2 else: return False, ant def appositive_phrases(aux, words, root, deps_root, ant): """ Simplify appositive phrases @param aux: auxiliary parser output @param words: auxiliary words and POS tags structure @param root: root node in the dependency tree @param deps_root: dependencies of the root node @param ant: previous sentence (for recursion purposes) @return: a flag that indicates whether or not the sentence was simplified and the result sentence (if flag = False, ant is returned) """ ## apposition needs to have a subject -- same subject of the mais sentence. if 'nsubj' in deps_root.keys(): subj = deps_root['nsubj'][0] subj_word = words[subj - 1][0] if subj not in aux: return False, ant deps_subj = aux[subj] v_tense = words[root - 1][1]['PartOfSpeech'] n_num = words[subj - 1][1]['PartOfSpeech'] if 'amod' in deps_subj: ## bug -- this generates several mistakes... mod = deps_subj['amod'][0] if mod in aux: deps_mod = aux[mod] else: deps_mod = {} del aux[subj]['amod'] deps_subj = aux[subj] ## Treat simple cases such as 'general rule' #if 'JJ' in words[mod-1][1]['PartOfSpeech'] and len(deps_mod.keys()) == 0: if 'JJ' in words[ mod - 1][1]['PartOfSpeech'] and 'punct' not in deps_subj: return False, ant elif 'appos' in deps_subj: mod = deps_subj['appos'][0] if mod in aux: deps_mod = aux[mod] else: deps_mod = {} del aux[subj]['appos'] deps_subj = aux[subj] else: return False, ant if 'punct' in deps_subj.keys(): del deps_subj['punct'] final_root = {} build(root, deps_root, aux, words, final_root) final_appos = {} build(mod, deps_mod, aux, words, final_appos) final_subj = {} build(subj, deps_subj, aux, words, final_subj) if len(final_appos.keys()) < 2: return False, ant sentence1, sentence2 = self.generation.print_sentence_appos( final_root, final_appos, final_subj, v_tense, n_num, subj_word) s1 = self.transformation(sentence1, ant) s2 = self.transformation(sentence2, ant) return True, s1 + " " + s2 else: return False, ant def passive_voice(aux, words, root, deps_root, ant): """ Simplify sentence from passive to active voice. @param aux: auxiliary parser output @param words: auxiliary words and POS tags structure @param root: root node in the dependency tree @param deps_root: dependencies of the root node @param ant: previous sentence (for recursion purposes) @return: a flag that indicates whether or not the sentence was simplified and the result sentence (if flag = False, ant is returned) """ if 'auxpass' in deps_root.keys(): if 'nmod:agent' in deps_root.keys(): if 'nsubjpass' not in deps_root: return False, ant subj = deps_root['nsubjpass'][0] if subj in aux: deps_subj = aux[subj] else: deps_subj = {} aux_tense = words[deps_root['auxpass'][0] - 1][1]['PartOfSpeech'] v_aux = None if aux_tense == 'VB' and 'aux' in deps_root.keys(): aux_tense = words[deps_root['aux'][0] - 1][1]['PartOfSpeech'] v_aux = words[deps_root['aux'][0] - 1][0] del deps_root['aux'] elif aux_tense == 'VBG' and 'aux' in deps_root.keys(): #aux_tense = aux.get_by_address(deps_root[u'aux'][0])[u'tag'] v_aux = words[deps_root['aux'][0] - 1][0] del deps_root['aux'] elif aux_tense == 'VBN' and 'aux' in deps_root.keys(): #v_aux = words[deps_root['aux'][0]-1][1]['PartOfSpeech'] v_aux = words[deps_root['aux'][0] - 1][0] if v_aux.lower() in ("has", "have"): v_aux = words[deps_root['aux'][0] - 1][0] else: aux_tense = 'MD' del deps_root['aux'] del deps_root['auxpass'] del deps_root['nsubjpass'] if len(deps_root['nmod:agent']) > 1: mod = deps_root['nmod:agent'][1] mod2 = deps_root['nmod:agent'][0] deps_mod = aux[mod] deps_mod2 = aux[mod2] if 'case' in deps_mod: if words[deps_mod[u'case'][0] - 1][0].lower() != 'by': return False, ant del deps_mod['case'] del deps_root['nmod:agent'] subj_tag = words[mod - 1][1]['PartOfSpeech'] subj_word = words[mod - 1][0] final_subj = {} build(mod, deps_mod, aux, words, final_subj) final_obj = {} build(subj, deps_subj, aux, words, final_obj) final_mod2 = {} build(mod2, deps_mod2, aux, words, final_mod2) final_root = {} build(root, deps_root, aux, words, final_root, False) sentence1 = self.generation.print_sentence_voice( final_subj, final_obj, words[root - 1][0], v_aux, aux_tense, subj_tag, subj_word, final_mod2, final_root) s1 = self.transformation(sentence1, ant) return True, s1 elif 'case' in deps_mod2: if words[deps_mod2['case'][0] - 1][0].lower() != 'by': return False, ant del deps_mod2['case'] del deps_root['nmod:agent'] subj_tag = words[mod2 - 1][1]['PartOfSpeech'] subj_word = words[mod2 - 1][0] final_subj = {} build(mod2, deps_mod2, aux, words, final_subj) final_obj = {} build(subj, deps_subj, aux, words, final_obj) final_mod2 = {} build(mod, deps_mod, aux, words, final_mod2) final_root = {} build(root, deps_root, aux, words, final_root, False) sentence1 = self.generation.print_sentence_voice( final_subj, final_obj, words[root - 1][0], v_aux, aux_tense, subj_tag, subj_word, final_mod2, final_root) s1 = self.transformation(sentence1, ant) return True, s1 else: return False, ant else: mod = deps_root['nmod:agent'][0] deps_mod = aux[mod] if 'case' in deps_mod: if words[deps_mod['case'][0] - 1][0].lower() != 'by': return False, ant del deps_mod['case'] del deps_root['nmod:agent'] subj_tag = words[mod - 1][1]['PartOfSpeech'] subj_word = words[mod - 1][0] final_subj = {} build(mod, deps_mod, aux, words, final_subj) final_obj = {} build(subj, deps_subj, aux, words, final_obj) final_root = {} build(root, deps_root, aux, words, final_root, False) sentence1 = self.generation.print_sentence_voice( final_subj, final_obj, words[root - 1][0], v_aux, aux_tense, subj_tag, subj_word, final_root) s1 = self.transformation(sentence1, ant) return True, s1 else: return False, ant else: return False, ant else: return False, ant ## MAIN OF TRANSFORMATION ## control recursion: check whether there is no simplification to be done if sent == ant: return sent flag = False ant = sent ## parser try: parsed = self.parser.process(sent) except AssertionError: return ant ## data structure for the words and POS words = parsed['words'] ## data structure for the dependency parser dict_dep = self.parser.transform(parsed) ## check whether or not the sentence has a root node if 0 not in dict_dep: return ant root = dict_dep[0]['root'][0] ## check for root dependencies if root not in dict_dep: return ant deps_root = dict_dep[root] ## get tokens sent_tok = [] for w in words: sent_tok.append(w[0]) ## dealing with questions ## TODO: improve this control with parser information. if sent_tok[0].lower() in ("what", "where", "when", "whose", "who", "which", "whom", "whatever", "whatsoever", "whichever", "whoever", "whosoever", "whomever", "whomsoever", "whoseever", "whereever") and sent_tok[-1] == "?": return ant ## deal with apposition flag, simpl = appositive_phrases(dict_dep, words, root, deps_root, ant) if flag: return simpl ## analyse whether or not a sentence has simplification clues (in this case, discourse markers or relative pronouns) a = Analysis(sent_tok, self.cc, self.relpron) flag_cc, type_cc = a.analyse_cc() ## if sentence has a marker that requires attention if flag_cc: ## sorting according to the order of the relations rel = {} for k in deps_root.keys(): if 'conj' in k or 'advcl' in k: others = sorted(deps_root[k], reverse=True) cnt = 0 for o in others: deps_root[k + str(cnt)] = [] deps_root[k + str(cnt)].append(o) rel[k + str(cnt)] = deps_root[k][0] cnt += 1 del deps_root[k] sorted_rel = sorted(rel.items(), key=operator.itemgetter(1)) for k in sorted_rel: flag, simpl = conjoint_clauses(dict_dep, words, root, deps_root, ant, type_cc, k[0]) if flag: return simpl flag_rc, type_rc = a.analyse_rc() ## if sentence has a relative pronoun if flag_rc: ## check where is the dependency of the relative clause if 'nsubj' in deps_root: flag, simpl = relative_clauses(dict_dep, words, root, deps_root, ant, 'nsubj') if flag: return simpl elif 'dobj' in deps_root: flag, simpl = relative_clauses(dict_dep, words, root, deps_root, ant, 'dobj') if flag: return simpl ## deal with passive voice flag, simpl = passive_voice(dict_dep, words, root, deps_root, ant) if flag: return simpl ## return the original sentence if no simplification was done if flag == False: return ant def simplify(self): """ Call the simplification process for all sentences in the document. """ #c = 0 simp_sentences = [] for s in self.sentences: #print "Original: " + s simp_sentences.append(self.transformation(s, '')) ## for demonstration purposes only. remove the prints later #print "Simplified: ", #print simp_sentences[c] #c+=1 #print return simp_sentences
check=True, ) # reattach audio to the newly generated video subprocess.run( "ffmpeg -i {} -i {} -map 0:0 -map 1:0 -vcodec copy -acodec copy -y {}".format( os.path.join(mount_dir, video_name, video_without_audio), os.path.join(mount_dir, video_name, "audio.aac"), os.path.join(mount_dir, video_name, video_with_audio), ), shell=True, check=True, ) # remove temp video without audio os.remove(os.path.join(mount_dir, video_name, video_without_audio)) if __name__ == "__main__": parser = Parser() parser.append_postprocess_args() args = parser.return_args() assert args.video_name is not None assert args.storage_mount_dir is not None postprocess( mount_dir=args.storage_mount_dir, video_name=args.video_name )
if i > 0: if queue_limit: condition = i % batch_size == 0 or i == queue_limit - 1 else: condition = i % batch_size == 0 or i == file_count - 1 if condition: bus_service.send_queue_message_batch(queue, msg_batch) msg_batch = [] return file_count if __name__ == "__main__": parser = Parser() parser.append_add_images_to_queue_args() args = parser.return_args() assert args.namespace is not None assert args.queue is not None assert args.sb_key_name is not None assert args.sb_key_value is not None assert args.storage_mount_dir is not None assert args.video_name is not None # setup logger handler_format = get_handler_format() console_handler = logging.StreamHandler(sys.stdout) console_handler.setFormatter(handler_format) logger = logging.getLogger("root")
from util import Parser as parser print(parser.calculate_molar_mass(parser.get_elements('CaCl2'),'CaCl2'))
directory. 2. Merge previously recorded bags from the specified directory. By default, all the topics defined in your project's 'topics' file will be recorded/merged if no arguments are specified. Otherwise, only the topics specified will be recorded/merged. """ import os import sys from util import Parser, TopicList __author__ = "Anass Al-Wohoush" __version__ = "1.2.0" if __name__ == "__main__": try: topics_path = os.environ["TOPICS_PATH"] except KeyError: print("E: TOPICS_PATH environment variable not set") sys.exit(2) topics = TopicList(topics_path) args = Parser(topics, __doc__, __version__) status = args.cmd( topics=args.enabled, name=args.name, dir=args.dir, args=args.raw).run() sys.exit(status)