def enhance_symbol_flags(self): is_float_function_pattern = re.compile( r"^__aeabi_(f.*|.*2f)|__addsf3$") def is_float_function_name(n): return is_float_function_pattern.match(n) float_functions = [ f for f in self.all_functions() if is_float_function_name(f[NAME]) ] for f in self.all_functions(): callees = f[CALLEES] f["calls_float_function"] = any( [ff in callees for ff in float_functions]) for file in self.all_files(): file["calls_float_function"] = any( [f["calls_float_function"] for f in file[FUNCTIONS]]) def folder_calls_float_function(folder): result = any([f["calls_float_function"] for f in folder[FILES]]) for sub_folder in folder[SUB_FOLDERS]: if folder_calls_float_function(sub_folder): result = True folder["calls_float_function"] = result return result for folder in self.root_folders(): folder_calls_float_function(folder)
def __filter_inventory(self, inventory): filtered_inventory = [] for i in range(len(inventory)): if 'Daily Summer Camp' in inventory[i]['descriptor'].split('[')[0] or \ '10 Week Summer Coding Term' in inventory[i]['descriptor'].split('[')[0] or \ 'Summer Camps \'20 - Weekly Registration' in inventory[i]['descriptor'].split('[')[0]: current_term_variant = inventory[i]['descriptor'] # if '\'20' in current_term_variant: # current_term_variant = current_term_variant.replace('\'20', '2020') # # if ', ' in current_term_variant.split('- ')[1].split('[')[0]: # current_term_variant = current_term_variant[0:current_term_variant.index(',')] + ' ' + current_term_variant[current_term_variant.index('['):len(current_term_variant)] if len(current_term_variant.split(',')) > 3: if not any(current_term_variant[0:current_term_variant. rfind(',')] in x for x in filtered_inventory): filtered_inventory.append(current_term_variant) else: if not any(current_term_variant in x for x in filtered_inventory): filtered_inventory.append(current_term_variant) return self.__split_inventory(filtered_inventory)
def check_pw_reqs(pw, length=None): """Check if the password includes lower letters, capital letters, numbers and special characters""" z85_special_chars = ".-:+=^!/*?&<>()[]{}@%$#" pw = pw[:length] return (builtins.any(c.islower() for c in pw) and builtins.any(c.isupper() for c in pw) and builtins.any(c.isdigit() for c in pw) and builtins.any(c in z85_special_chars for c in pw))
def _check_signature(filename): """Check the filetype by its signature (the leading part of the file). If the first several characters are all ASCII, return the string ``TXT``. Returns ------- str Either a short string identifying the filetype (currently "HDF5", "DOS Format" or "TXT") OR `None` if the type is unknown. """ file_signatures = {b'\x89\x48\x44\x46\x0d\x0a\x1a\x0a':'HDF5', b'DOS Format':'DOS Format', b'\x11Varian':'Cary UV'} max_sig_length = max(list(map(len,list(file_signatures.keys())))) with open(filename,'rb') as fp: inistring = fp.read(max_sig_length) if any(thiskey in inistring for thiskey in list(file_signatures.keys())):# this will fail without overriding the numpy any( above retval = file_signatures[next((thiskey for thiskey in list(file_signatures.keys()) if thiskey in inistring))] logger.debug(strm("Found magic signature, returning", retval)) return retval else: try: inistring.decode('ascii') return 'TXT' except UnicodeDecodeError: # if it failed, it's because the string is not ASCII return None
def folder_calls_float_function(folder): result = any([f["calls_float_function"] for f in folder[FILES]]) for sub_folder in folder[SUB_FOLDERS]: if folder_calls_float_function(sub_folder): result = True folder["calls_float_function"] = result return result
def __contains__(self, path): node = path[-1] if isinstance(node, xsc.Element): attr = node.attrs.get(self.attrname) if not attr.isfancy(): # if there are PIs, say no return builtins.any(str(attr).endswith(attrvalue) for attrvalue in self.attrvalues) return False
def __contains__(self, path): node = path[-1] if isinstance(node, xsc.Element): attr = node.attrs.get("class") if not attr.isfancy(): return builtins.any(classname in str(attr).split() for classname in self.classnames) return False
def nfunc(*args, **kwargs): if builtin.any(hasattr(iarg, 'chunks') for iarg in args): if dtypes is not None: kwargs['dtype'] = np.dtype(dtypes[str(args[0].dtype)]) else: kwargs['dtype'] = args[0].dtype return map_blocks(func, *args, **kwargs) else: return func(*args, **kwargs)
def any(iterable, pred): """Returns True if ANY element in the given iterable is True for the given pred function""" warnings.warn( "pipe.any is deprecated, use the builtin any(...) instead.", DeprecationWarning, stacklevel=4, ) return builtins.any(pred(x) for x in iterable)
def __call__(self, request): # Code to be executed for each request before # the view (and later middleware) are called. response = self.get_response(request) if not request.user.is_authenticated(): path = request.path_info.lstrip('/') if not any(m.match(path) for m in EXEMPT_URLS): return HttpResponseRedirect(settings.LOGIN_URL) return response
def matching(commit, parents, blob_sha): res = commit_matching_path(commit, blob_sha) # print("trying {}".format(commit)) if res and not builtins.any( commit_matching_path(parent, blob_sha) for parent in parents ): return res else: return None
def wrapper(*args, **kwargs): is_sparse = builtins.any([isinstance(x, STensor) for x in args]) is_dense = builtins.any([isinstance(x, DTensor) for x in args]) if (is_sparse and is_dense): raise TypeError( f"Parameters of `st.{func.__name__}` should be all STensor \ or all DTensor.") if is_sparse: if sfunc is None: raise TypeError(f"`st.{func.__name__}` doesn't support \ STensor parameters.") args = tuple(x._data if isinstance(x, STensor) else x for x in args) return _ensure_tensor(sfunc(*args, **kwargs)) else: args = tuple(x._data if isinstance(x, DTensor) else x for x in args) return _ensure_tensor(dfunc(*args, **kwargs))
def test_is_number_positive(): try: from lesson import is_number_positive except ImportError: _import_error('is_number_positive') return False falses = [is_number_positive(x) for x in [-1, -2, -3, -4, -5]] trues = [is_number_positive(x) for x in [1, 2, 3, 4, 5]] return not any(falses) and all(trues) and is_number_positive( 0) == "Neither"
def any(f, lst): """Returns `true` if at least one of the elements of the list match the predicate, `false` otherwise. Args: f (function): The predicate function. lst (list): The list to consider. Returns: bool: `true` if the predicate is satisfied by at least one element, `false` otherwise. """ return builtins.any(map(f, lst))
def contains(self, keywords: list, all_colnames: list = None) -> list: """ Check if keywords exist in all_colnames. - Determine whether a keyword (substring) exists in a given list of column names (strings). - Note: This search is case sensitive! Parameters ---------- keywords : list[str] - List of key words that the user is interested in all_colnames : list[str] - List of column names of a table, or for many tables. - If no argument is provided, this function will use the column names generated when the run method was called. Returns ------- list - Each index corresponds to a keyword. - For each index, True if substring exists in list of strings, otherwise False. Examples -------- >>> colnames = ['id', 'name', 'title'] >>> cb.contains(['name'], colnames) [True] >>> cb.contains(['Name'], colnames) [False] >>> cb.contains(['name', 'Name'], colnames) [True, False] """ if all_colnames is None: return [ any(keyword in colname for colname in self.all_colnames) for keyword in keywords ] else: return [ any(keyword in colname for colname in all_colnames) for keyword in keywords ]
def can_take_course(self, list_of_courses_taken, current_term_courses, course): # TODO Throw errors in the future """ Check if the course violates any prereq, coreq, and antireq requirments, return boolean along with any error message :param list_of_courses_taken: list[str] :param course: str :return: Bool, str """ # ANTIREQ for anti_req in self.antireqs: if any(c in anti_req for c in list_of_courses_taken + current_term_courses): return False, "The course has an antirequisite." # Course cannot be repeated if course in list_of_courses_taken: return False, "Course has already been taken." # PREREQ & COREQ prereq_logic = self.prereq_logic for i in range(len(self.prereq_courses)): if self.prereq_courses[i][0] == "_": prereq_logic = self.level_can_take(prereq_logic, self.prereq_courses[i][1:], list_of_courses_taken + current_term_courses, i) else: prereq_logic = self.level_can_take(prereq_logic, self.prereq_courses[i], list_of_courses_taken, i) try: if eval(prereq_logic): return True, "" else: return False, "Prerequisite or corequisite not met." except Exception as e: # EMAIL(course, self.prereq_courses, self.prereq_logic, list_of_courses_taken, current_term_courses, e) # Error Log # TODO: Prevent sending multiple emails with the same error in a short amount of time error_message = "Error Message: " + str(e) + "." error_message += "\n\ncan_take_course({}, {}, {})".format(list_of_courses_taken, current_term_courses, course) error_message += "\n\nAntireqs: " + str(self.antireqs) error_message += "\n\nPrereq Logic: " + str(self.prereq_logic) error_message += "\n\nPrereq Courses: " + str(self.prereq_courses) error_message += "\n\nCoreq Logic: " + str(self.coreq_logic) error_message += "\n\nCoreq Courses: " + str(self.coreq_courses) error_message += "\n\nOccurred at: " + str(datetime.now()) + " (UTC)" msg = EmailMessage("Error in ValidationCheckAPI/CanTakeCourse", error_message, settings.EMAIL_HOST_USER, [settings.EMAIL_HOST_USER]) msg.send() return True
def wrapped(message): if not message.text: return False split_message = re.split(r'[^\w@/]', message.text.lower()) if not inline: s = split_message[0] return ((s in cmnds) or (s.endswith(my_bot_name) and s.split('@')[0] in cmnds)) else: return any( cmnd in split_message or cmnd + my_bot_name in split_message for cmnd in cmnds)
def wrapped(message): if not message.text: return False split_message = re.split(r'[^\w@/]', message.text.lower()) if not inline: s = split_message[0] return ((s in cmnds) or (s.endswith(my_bot_name) and s.split('@')[0] in cmnds)) else: return any(cmnd in split_message or cmnd + my_bot_name in split_message for cmnd in cmnds)
def sequential(self, *args, **kwargs): broadcast_shape, dim_sizes, args = self._process_args(args, kwargs) outputs = None otypes = self.otypes nout = len(self._out) for index in np.ndindex(*broadcast_shape): i_args = ((arg[index] if _in is not None else arg) for _in, arg in zip(self._in, args)) if self.sendindex: results = self.pyfunc(index, *i_args) else: results = self.pyfunc(*i_args) n_results = len(results) if isinstance(results, tuple) else 1 if nout != n_results: raise ValueError( 'wrong number of outputs from pyfunc: expected %r, got %r' % (nout, n_results)) if nout == 1: results = (results, ) if outputs is None: for result, core_dims in zip(results, self._out): _update_dim_sizes(dim_sizes, result, core_dims) if otypes is None: otypes = [np.asarray(result).dtype for result in results] outputs = _create_arrays(broadcast_shape, dim_sizes, self._out, otypes) for output, result in zip(outputs, results): output[index] = result if outputs is None: # did not call the function even once if otypes is None: raise ValueError('cannot call `vectorize` on size 0 inputs ' 'unless `otypes` is set') if builtins.any(dim not in dim_sizes for dims in self._out for dim in dims): raise ValueError('cannot call `vectorize` with a signature ' 'including new output dimensions on size 0 ' 'inputs') outputs = _create_arrays(broadcast_shape, dim_sizes, self._out, otypes) return outputs[0] if nout == 1 else outputs
def get_articles(members, level, max_level): open("/home/renzo/Artists-data/levels_singers.txt", "a").write(" " + str(level) + "\n") for c in members.values(): if c.ns == wikipediaapi.Namespace.CATEGORY and not any( x in c.title for x in exclude_cats): if level < max_level: if cat_DAO.exists(c.fullurl) is False or cat_DAO.find( c.fullurl)['status'] == 'visiting': wiki_cat = WikiCategory(c, "visiting") cat_DAO.insert(wiki_cat) print("visiting: %s" % (c.fullurl)) open("/home/renzo/Artists-data/levels_singers.txt", "a").write(str(c.fullurl)) get_articles(c.categorymembers, level=level + 1, max_level=max_level) wiki_cat.status = "visited" cat_DAO.replace(wiki_cat) elif c.ns == wikipediaapi.Namespace.MAIN and not any( x in c.title for x in exclude_pages): art_DAO.insert(WikiArtist(c))
async def any(itr: AnyIterable[MaybeAwaitable[Any]]) -> bool: """ Return True if any value is truthy in a mixed iterable, else False. The iterable will be fully consumed and any awaitables will automatically be awaited. Example: if await any(it): ... """ return builtins.any(await ait_asyncio.gather_iter(itr))
async def zip(*itrs: AnyIterable[Any]) -> AsyncIterator[Tuple[Any, ...]]: """ Yield a tuple of items from mixed iterables until the shortest is consumed. Example:: async for a, b, c in zip(i, j, k): ... """ its: List[AsyncIterator[Any]] = [iter(itr) for itr in itrs] while True: values = await asyncio.gather(*[it.__anext__() for it in its], return_exceptions=True) if builtins.any(isinstance(v, AnyStop) for v in values): break yield values
def get_line_num_dict(self): line_num_dict = {} for key, value in network_dict.items(): search_term = value line_list = [] with open(self.key_file) as file: reader = csv.reader(file, delimiter=',') line_number = 0 for line in reader: line_number += 1 if any(search_term in x for x in line): line_list.append(line_number - self.header_size) line_num_dict[key] = line_list self.line_num_dict = line_num_dict
def extract_assertion(para_dict, coref_info): """ input: para_dict -> key:paragraph ID, value: dict of segmented sentences from that para coref_info -> output: char_dict -> key: canonical name of character, values: dict with 'position' and 'text' as keys """ char_dict = defaultdict(list) #For each paragraph and it's corresponding segments for (para_id, (segments, segment_ranges)) in para_dict.items(): #For each segment for (segment_id, segment) in segments.items(): segment_range = segment_ranges[segment_id] #For each character in character list for cluster in coref_info['clusters']: if not 'name' in cluster: continue character = cluster['name'] mention_positions = [ m['position'] for m in cluster['mentions'] ] if any(pos[0] >= segment_range[0] and pos[1] <= segment_range[1] \ for pos in mention_positions): char_dict[character].append({ 'position': segment_range, 'text': segment.strip() }) #if character in segment: # #Added fix for quotation removal # #(would be ideal to use the same as quote attribution does # #if possible) # # Replace quote tags so don't interfere # segment = re.sub(r'<character name="(.+?)">', r'<character name=|||\1|||>', segment) # #segment = re.sub(r'\s".*?"\s', '', segment) # #segment = re.sub(r'(^|\s)(“|``|"+|«).+?(”|\'\'|"+|»)(\s|$)', ' ', segment) # segment = re.sub(r'(^|\s)(“|``|"+|«).+?(”|\'\'|"+|»)', ' ', segment) # segment = re.sub(r' +', ' ', segment) # # Put quote tags back in # segment = segment.replace('|||', '"') # char_dict[character].append(segment.strip()) return char_dict
def crawl_and_process(out_file, input_dir): """ Return whoosh index from given directory. Crawl and process files to convert documents to html then rebuild the whoosh index. """ # Get all the files we need to convert files = get_file_names(input_dir) # Go through each of the files and then # convert the md and ipynb files to html for my_file in files: # do this to bypass any reveal.js content # from notebook slides if not builtins.any(word in my_file for word in ['reveal', 'ai_py']): _, file_ext = os.path.splitext(my_file) prep_html(file_ext, my_file) # Regenerate the index gen_index(input_dir, out_file)
def push(self, list_to_push): list_to_push = self.blacklisted_urls(list_to_push) self.url_list.extend(list_to_push) seen = set() seen_add = seen.add self.url_list = [ x for x in self.url_list if not (x in seen or seen_add(x)) ] non_domain_lista = [ x for x in self.url_list if not re.findall(re.escape(self.current_domain), urlparse(x).netloc) ] domain_lista = [x for x in self.url_list if x not in non_domain_lista] self.url_list = domain_lista + non_domain_lista if not any(self.current_domain in urlparse(x).netloc for x in self.url_list): self.parsed_domains.append(self.current_domain) self.current_domain = '' self.logger.info(f'Pushed list. Remaining {len(self.url_list)}')
def run(self): try: print('Parse ' + self.url) article = Article(self.url) article.download() # if article.download_exception_msg and "404" in article.download_exception_msg: # logger.error('404 not found, delete... ' + self.url) # news_collection.remove({"id": self.tweet_id}) # return # if article.download_exception_msg and "410" in article.download_exception_msg: # logger.error('410 client error, delete... ' + self.url) # news_collection.remove({"id": self.tweet_id}) # return article.parse() ignore_list = ["twitter.com", "youtube.com", "facebook.com", "instagram.com"] if any(x in article.canonical_link for x in ignore_list): print('delete ' + article.canonical_link) news_collection.remove({"id": self.tweet_id}) return print( 'Title for ' + article.top_image + ' - ' + article.canonical_link + '\n' + article.title + '\n\n') print('Latest: ' + str(latest)) if news_collection.find({'$or': [{'title': article.title}, {'text': article.text}]}).count() > 0: print('Duplicate, Ignore!') news_collection.remove({"id": self.tweet_id}) return vector = 0 news_collection.update_one({'id': self.tweet_id}, {'$set': { 'vector': vector, 'title': article.title, 'text': article.text, 'image': article.top_image}}) except Exception as e: logger.error(str(e))
def any(*args): return builtins.any(args)
def __contains__(self, path): return builtins.any(isinstance(node, xsc.Attr) for node in path)
line = line.lower() line = line.replace("\"", "") # if not a restaurant or is closed or not in NY #if "restaurant" not in line or "is_open:0" in line or "state:ny" not in line: # continue line = line[1:] line = line[:-1] info_map = {} while (line != ""): if line[0] == ',': line = line[1:] info_split = line[:line.find(',')] key_value = info_split.split(':') if len(key_value) > 1: key = key_value[0] needed = any(key in x for x in keys) if needed: info_map[key] = key_value[1] #print ("KEY: " + key + " VALUE: " + info_map[key]) line = line[len(info_split) + 1:] if info_map: # if it has been populated ny_restaurants.append(info_map) # Populate database with array of dictionaries # insertion_formula = "INSERT INTO restaurants (business_id, name, address, city, state, postal_code, latitude, longitude,stars, review_count) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)" for d in ny_restaurants: row = (d[keys[0]], d[keys[1]], d[keys[2]], d[keys[3]], d[keys[4]], d[keys[5]], d[keys[6]], d[keys[7]], d[keys[8]], d[keys[9]]) mycursor.execute(insertion_formula, row)
def any(iterable, pred): "Returns True if ANY element in the given iterable is True for the given pred function" return builtins.any(pred(x) for x in iterable)
def any(function_or_iterable, *args): if len(args) == 0: return builtins.any(function_or_iterable) else: return builtins.any(map(function_or_iterable, args[0]))
def __contains__(self, path): return builtins.any(path in sel for sel in self.selectors)
def any(iterable, pred): """Returns True if ANY element in the given iterable is True for the given pred function""" return builtins.any(pred(x) for x in iterable)