def resolve_first_degree(left, right): left_numbers, left_x = utils.extract_numbers(left, 1) left_numbers = utils.clean_list(left_numbers, 'left') left_x = utils.clean_vars(left_x, 'left') right_numbers, right_x = utils.extract_numbers(right, 1) right_numbers = utils.clean_list(right_numbers, 'right') right_x = utils.clean_vars(right_x, 'right') unkowns_cleaned, numbers_cleaned = utils.final_clean( left_x, right_x, left_numbers + right_numbers, 1) final_solver_first_degree(numbers_cleaned, unkowns_cleaned[1])
def resolve_second_degree(left, right): left_numbers, left_x = utils.extract_numbers(left, 2) left_numbers = utils.clean_list(left_numbers, 'left') left_x = utils.clean_vars(left_x, 'left') right_numbers, right_x = utils.extract_numbers(right, 2) right_numbers = utils.clean_list(right_numbers, 'right') right_x = utils.clean_vars(right_x, 'right') unkown_cleaned, numbers_cleaned = utils.final_clean( left_x, right_x, right_numbers + left_numbers, 1) if unkown_cleaned[2] == 0: final_solver_first_degree(numbers_cleaned, unkown_cleaned[1]) else: final_solver_second_degree(unkown_cleaned[2], unkown_cleaned[1], numbers_cleaned) return 1
def resolve_zero_degree(left, right): left_numbers = utils.extract_numbers(left, 0) left_numbers = utils.clean_list(left_numbers, 'left') right_numbers = utils.extract_numbers(right, 0) right_numbers = utils.clean_list(right_numbers, 'right') left_numbers = utils.final_clean({}, {}, left_numbers, 0) right_numbers = utils.final_clean({}, {}, right_numbers, 0) right_numbers *= -1 if right_numbers == left_numbers: print(str(left_numbers) + '* X^0 = ' + str(right_numbers) + ' * X^0') print('The solution is: ') print('All real numbers') else: print( str(left_numbers) + ' is not equal to' + ' ' + str(right_numbers)) print('There is no solution.')
def data_from_csv(self, filename): f = open(filename, 'rU') rows = [] for line in f: row = line.split(',') row = clean_list(row) rows.append(row) return rows
def write_text(event_file): """Takes event file as input, writes text from all queries contained in event file to TEXT_DIR, and returns a list of documents written""" for line in open(event_file): query = line.strip() logger.info('Writing query from %s: "%s"' % (current_process(), query)) qi = QueryIterator('http://search-s11.prod.wikia.net:8983/solr/main/', {'query': query, 'fields': 'id,wid,html_en,indexed', 'sort': 'id asc'}) for doc in qi: # Sanitize and write text text = '\n'.join(clean_list(doc.get('html_en', ''))) localpath = os.path.join(TEXT_DIR, doc['id']) logger.debug('Writing text from %s to %s' % (doc['id'], localpath)) with open(localpath, 'w') as f: f.write(text) return 'Finished event file %s' % event_file
k.set_contents_from_filename(tar_file) os.remove(tar_file) # send post requests for each wid covered in this batch for wid in wids: requests.post('http://nlp-s1:5000/wiki/%i' % wid) wids = [] batch_count += 1 dest_dir = ensure_dir_exists(TEXT_DIR + '%s_%i' % (os.path.basename(qqfile), batch_count)) # open tarball for writing tar_file = dest_dir + '.tgz' tar = tarfile.open(tar_file, 'w:gz') wid = int(doc['wid']) if wid not in wids: wids.append(wid) # sanitize and write text text = '\n'.join(clean_list(doc.get('html_en', ''))) localpath = os.path.join(dest_dir, doc['id']) with open(localpath, 'w') as f: f.write(text) # add text file to tarball tar.add(localpath, doc['id']) doc_count += 1 # tar the final batch and send to aws if tar: tar.close() shutil.rmtree(dest_dir) if aws: k.key = 'text_events/%s' % os.path.basename(tar_file) k.set_contents_from_filename(tar_file) os.remove(tar_file) for wid in wids:
'I-ARG0': 4, 'I-ARG1': 4, 'I-ARG2': 4, 'I-ARG3': 4 } number_of_num = len(set(labeles_to_num.values())) empty_tag = [labeles_to_num['O'] for i in range(max_len)] sents_srl_features = {} for sent in pos_sents_srl: srl = pos_sents_srl[sent] k = 0 srl_features = [] for v in srl['verbs']: tags = utils.clean_list(v['tags'], important_tags, 'O') tags = tags[:max_len] + ['pad' for i in range(max_len - len(tags))] tags_num = [labeles_to_num[i] for i in tags] srl_features.append(tags_num) k += 1 if k == number_of_verbs: break srl_features = srl_features + [ empty_tag for i in range(number_of_verbs - len(srl_features)) ] sents_srl_features[sent] = tf.keras.utils.to_categorical( np.array(srl_features), num_classes=number_of_num) sents_srl_features[sent] = sents_srl_features[sent].reshape( 30, number_of_num * number_of_verbs) ###parse sentences
def add_each_matching_item_to_results(items, search_terms, interaction_mappings = {}): ''' Will add each item with a field value that matches a search term to the results, and will add the matched values to the matches ''' results = [] matches = [] studies_to_be_checked = [] phenotypes_to_be_checked = [] genotypes_to_be_checked = [] panels_to_be_checked = [] ''' 'study_id_to_interaction_id_mapping' is filled when searching through interactions. Having that dict saves a for-loop later on in the template. ''' study_id_to_interaction_id_mapping = {} for item in items: if type(item) == Interaction: ''' Make sure every item referenced by the interaction is checked to see if it should be included in the search result ''' studies_to_be_checked.append(item.study) if not study_id_to_interaction_id_mapping.has_key(item.study.id): study_id_to_interaction_id_mapping[item.study.id] = [] study_id_to_interaction_id_mapping[item.study.id].append(item.id) phenotypes_to_be_checked.append( interaction_mappings[item.id]['phenotypes']) genotypes_to_be_checked.append( interaction_mappings[item.id]['genotypes']) panels_to_be_checked.append( interaction_mappings[item.id]['panels']) else: if not type(item) == Study: if not study_id_to_interaction_id_mapping.has_key(item.study.id): study_id_to_interaction_id_mapping[item.study.id] = [] study_id_to_interaction_id_mapping[item.study.id].append(item.id) # Group field names and values together list_of_name_value_pairs = [ (field.name, getattr(item,field.name)) for field in item._meta.fields] for li in list_of_name_value_pairs: if not (li[0]=='id' or li[0]=='study'): # Handle unicode strings to lowercase (since str(u'abc') with a unicode string raises an error) val = '' if isinstance( li[1], str ) or isinstance( li[1], unicode ): val = li[1].lower() else: val = str(li[1]).lower() for term in search_terms: if term in val: # A search term has been found in the field value results.append(item) matches.append(val) if type(item) == Interaction: ''' Add the study, genotype, phenotype and panel related to this interaction ''' if item.study != None: results.append(item.study) if len(item.phenotypes.all())!=0: results.append(item.phenotypes.all()[0]) if len(item.genotypes.all())!=0: results.append(item.genotypes.all()[0]) if len(item.panels.all())!=0: results.append(item.panels.all()[0]) return {'results': results, 'matches': matches, 'studies_to_be_checked': utils.clean_list( studies_to_be_checked), 'phenotypes_to_be_checked': utils.clean_list( phenotypes_to_be_checked), 'genotypes_to_be_checked': utils.clean_list( genotypes_to_be_checked), 'panels_to_be_checked': utils.clean_list(panels_to_be_checked), 'study_id_to_interaction_id_mapping': study_id_to_interaction_id_mapping }