def transform(self, table, pivot_column, metric_column, x_axis_column): """Returns pivot table based on the pivot column""" # NOTE:: Split transformation is for single metric. Future implementation for multiple metric # Get index of the pivot column pivot_column_index = table.columns.index(pivot_column) # Find the index of the metric column metric_column_index = table.columns.index(metric_column) new_split_columns = OrderedSet([]) # Get values of new columns for data in table.data: new_split_columns.add(data[pivot_column_index]) new_split_columns = list(new_split_columns) # Set the metric for the new columns grouping_column_index = x_axis_column new_split_data = OrderedDict() new_data = [] for index, data in enumerate(table.data): temp_metric = data[metric_column_index] temp_pivot_value = data[pivot_column_index] temp_grouping_column = data[grouping_column_index] if not new_split_data.get(temp_grouping_column): new_split_data[temp_grouping_column] = [ '-' ] * len(new_split_columns) new_split_data[temp_grouping_column][new_split_columns.index( temp_pivot_value)] = temp_metric for key in new_split_data: new_data.append([key] + new_split_data[key]) table.columns = [table.columns[x_axis_column] ] + [column for column in new_split_columns] table.data = new_data return table
def merge(*lists): """ Merge lists while trying to keep the relative order of the elements. Warn if the lists have the same elements in a different relative order. For static assets it can be important to have them included in the DOM in a certain order. In JavaScript you may not be able to reference a global or in CSS you might want to override a style. """ dependency_graph = defaultdict(set) all_items = OrderedSet() for list_ in filter(None, lists): head = list_[0] # The first items depend on nothing but have to be part of the # dependency graph to be included in the result. dependency_graph.setdefault(head, set()) for item in list_: all_items.add(item) # No self dependencies if head != item: dependency_graph[item].add(head) head = item try: return stable_topological_sort(all_items, dependency_graph) except CyclicDependencyError: warnings.warn( 'Detected duplicate Media files in an opposite order: {}'. format(', '.join(repr(list_) for list_ in lists)), MediaOrderConflictWarning, ) return list(all_items)
def merge(*lists): """ Merge lists while trying to keep the relative order of the elements. Warn if the lists have the same elements in a different relative order. For static assets it can be important to have them included in the DOM in a certain order. In JavaScript you may not be able to reference a global or in CSS you might want to override a style. """ dependency_graph = defaultdict(set) all_items = OrderedSet() for list_ in filter(None, lists): head = list_[0] # The first items depend on nothing but have to be part of the # dependency graph to be included in the result. dependency_graph.setdefault(head, set()) for item in list_: all_items.add(item) # No self dependencies if head != item: dependency_graph[item].add(head) head = item try: return stable_topological_sort(all_items, dependency_graph) except CyclicDependencyError: warnings.warn( 'Detected duplicate Media files in an opposite order: {}'.format( ', '.join(repr(l) for l in lists) ), MediaOrderConflictWarning, ) return list(all_items)
def test_len(self): s = OrderedSet() self.assertEqual(len(s), 0) s.add(1) s.add(2) s.add(2) self.assertEqual(len(s), 2)
def navigation_event_ids_by_user(user, start_date=None, end_date=None): database = NavigationEventAudit.get_db() def _date_key(date): return [date.year, date.month, date.day] startkey = [user] if start_date: startkey.extend(_date_key(start_date)) endkey = [user] if end_date: end = end_date + timedelta(days=1) endkey.extend(_date_key(end)) else: endkey.append({}) ids = OrderedSet() results = database.view( 'auditcare/urlpath_by_user_date', startkey=startkey, endkey=endkey, reduce=False, include_docs=False, ) for row in results: ids.add(row['id']) return ids
def get_repositories(self, query=None): if not query: resp = self.get_client().get_repos(self.username) return [{ "identifier": repo["full_name"], "name": repo["full_name"] } for repo in resp.get("values", [])] exact_query = (u'name="%s"' % (query)).encode("utf-8") fuzzy_query = (u'name~"%s"' % (query)).encode("utf-8") exact_search_resp = self.get_client().search_repositories( self.username, exact_query) fuzzy_search_resp = self.get_client().search_repositories( self.username, fuzzy_query) result = OrderedSet() for j in exact_search_resp.get("values", []): result.add(j["full_name"]) for i in fuzzy_search_resp.get("values", []): result.add(i["full_name"]) return [{ "identifier": full_name, "name": full_name } for full_name in result]
def get_repositories(self, query=None): username = self.model.metadata.get("uuid", self.username) if not query: resp = self.get_client().get_repos(username) return [{ "identifier": repo["full_name"], "name": repo["full_name"] } for repo in resp.get("values", [])] exact_query = f'name="{query}"'.encode() fuzzy_query = f'name~"{query}"'.encode() exact_search_resp = self.get_client().search_repositories( username, exact_query) fuzzy_search_resp = self.get_client().search_repositories( username, fuzzy_query) result = OrderedSet() for j in exact_search_resp.get("values", []): result.add(j["full_name"]) for i in fuzzy_search_resp.get("values", []): result.add(i["full_name"]) return [{ "identifier": full_name, "name": full_name } for full_name in result]
def test_remove(self): s = OrderedSet() self.assertEqual(len(s), 0) s.add(1) s.add(2) s.remove(2) self.assertEqual(len(s), 1) self.assertNotIn(2, s)
def get_parent_list(self): """ Returns all the ancestors of this model as a list ordered by MRO. Useful for determining if something is an ancestor, regardless of lineage. """ result = OrderedSet(self.parents) for parent in self.parents: for ancestor in parent._meta.get_parent_list(): result.add(ancestor) return list(result)
def get_parent_list(self): """ Return all the ancestors of this model as a list ordered by MRO. Useful for determining if something is an ancestor, regardless of lineage. """ result = OrderedSet(self.parents) for parent in self.parents: for ancestor in parent._meta.get_parent_list(): result.add(ancestor) return list(result)
def _parse_constraint_columns(self, check_clause, columns): check_columns = OrderedSet() statement = sqlparse.parse(check_clause)[0] tokens = (token for token in statement.flatten() if not token.is_whitespace) for token in tokens: if (token.ttype == sqlparse.tokens.Name and self.connection.ops.quote_name(token.value) == token.value and token.value[1:-1] in columns): check_columns.add(token.value[1:-1]) return check_columns
def get(request): keywords = request.GET.get('s', '') current_type = request.GET.get('s_type', '') if current_type == 'question': return_suggest_list = [] if keywords: s = ZhihuQuestion.search() s = s.suggest("my_suggest", keywords, completion={ "field": "suggest", "fuzzy": { "fuzziness": 20 }, "size": 10 }) suggestions = s.execute() for match in suggestions.suggest.my_suggest[0].options[:10]: source = match._source return_suggest_list.append(source["title"]) return HttpResponse(json.dumps(return_suggest_list), content_type="application/json") elif current_type == 'job': return_suggest_list = [] if keywords: s = Lagou.search() s = s.suggest("my_suggest", keywords, completion={ "field": "suggest", "fuzzy": { "fuzziness": 20 }, "size": 10 }) suggestions = s.execute() name_set = OrderedSet() for match in suggestions.suggest.my_suggest[0].options[:10]: source = match._source name_set.add(source["title"]) for name in name_set: return_suggest_list.append(name) return HttpResponse(json.dumps(return_suggest_list), content_type="application/json")
def getCompanyListWithHistoryData(companyData): context = {} #filter out all the intermediate reports and calculate the current EPS and PE value based on the new share price companiesList = OrderedSet() readHistoryData = False for data in companyData: companiesList.add(data.company) if not readHistoryData: #load the history data for this symbol historyData = getCompanyHistoryDataByCompany(data.company) context['historyData'] = historyData readHistoryData = True #Get the last updated date of the data --> select the last updated Trade Summary and get the date latestDetailedTrade = DailyTradeSummary.objects.latest('date') context['lastUpdateDate'] = latestDetailedTrade.date context['companiesList'] = companiesList return context
def getPlaylistUrls(youtubeUrl): if 'http' not in youtubeUrl: url = 'https://' + youtubeUrl else: url = youtubeUrl sTUBE = '' cPL = '' urls = OrderedSet() if 'list=' in url: p = re.compile(r'list=([^&]*)') match = p.search(url) listStr = match.group() eq = listStr.rfind('=') + 1 cPL = listStr[eq:] else: print('Incorrect Playlist.') exit(1) try: yTUBE = urllib.request.urlopen( 'https://www.youtube.com/playlist?list=' + cPL).read() sTUBE = str(yTUBE) except urllib.error.URLError as e: print(e.reason) tmp_mat = re.compile(r'watch\?v=\S+?list=' + cPL) mat = re.findall(tmp_mat, sTUBE) if mat: for PL in mat: yPL = str(PL) if '&' in yPL: yPL_amp = yPL.index('&') urls.add('https://www.youtube.com/' + yPL[:yPL_amp]) return urls
def media(self): unique_css = OrderedSet() unique_js = OrderedSet() for js in getattr(self.Media, 'js', ()): unique_js.add(js) for css in getattr(self.Media, 'css', ()): unique_css.add(css) for module in self.modules: for js in getattr(module.Media, 'js', ()): unique_js.add(js) for css in getattr(module.Media, 'css', ()): unique_css.add(css) class Media: css = list(unique_css) js = list(unique_js) return Media
def _filter_candidates_by_published_status(candidates): from fluentcms_publishing.middleware import is_draft_request_context # Filter candidate results by published status, using # instance attributes instead of queryset filtering to # handle unpublishable and fluentcms publishing-enabled items. objs = OrderedSet() # preserve order & remove dupes if is_draft_request_context(): for candidate in candidates: # Keep candidates that are publishable draft copies, or # that are not publishable (i.e. they don't have the # `is_draft` attribute at all) if getattr(candidate, 'is_draft', True): objs.add(candidate) # Also keep candidates where we have the published copy and # can exchange to get the draft copy with an identical URL elif hasattr(candidate, 'get_draft'): draft_copy = candidate.get_draft() if draft_copy.get_absolute_url() == \ candidate.get_absolute_url(): objs.add(draft_copy) else: for candidate in candidates: # Keep candidates that are published, or that are not # publishable (i.e. they don't have the `is_published` # attribute) if getattr(candidate, 'is_published', True): # Skip candidates that are not within any publication # date restrictions if (hasattr(candidate, 'is_within_publication_dates') and not candidate.is_within_publication_dates()): pass else: objs.add(candidate) # Convert `OrderedSet` to a list which supports `len`, see # https://code.djangoproject.com/ticket/25093 return list(objs)
def get_constraints(self, cursor, table_name): """ Retrieve any constraints or keys (unique, pk, fk, check, index) across one or more columns. """ constraints = {} # Get the actual constraint names and columns name_query = """ SELECT kc.`constraint_name`, kc.`column_name`, kc.`referenced_table_name`, kc.`referenced_column_name` FROM information_schema.key_column_usage AS kc WHERE kc.table_schema = DATABASE() AND kc.table_name = %s ORDER BY kc.`ordinal_position` """ cursor.execute(name_query, [table_name]) for constraint, column, ref_table, ref_column in cursor.fetchall(): if constraint not in constraints: constraints[constraint] = { 'columns': OrderedSet(), 'primary_key': False, 'unique': False, 'index': False, 'check': False, 'foreign_key': (ref_table, ref_column) if ref_column else None, } constraints[constraint]['columns'].add(column) # Now get the constraint types type_query = """ SELECT c.constraint_name, c.constraint_type FROM information_schema.table_constraints AS c WHERE c.table_schema = DATABASE() AND c.table_name = %s """ cursor.execute(type_query, [table_name]) for constraint, kind in cursor.fetchall(): if kind.lower() == "primary key": constraints[constraint]['primary_key'] = True constraints[constraint]['unique'] = True elif kind.lower() == "unique": constraints[constraint]['unique'] = True # Add check constraints. if self.connection.features.can_introspect_check_constraints: type_query = """ SELECT c.constraint_name, c.check_clause FROM information_schema.check_constraints AS c WHERE c.constraint_schema = DATABASE() AND c.table_name = %s """ cursor.execute(type_query, [table_name]) for constraint, check_clause in cursor.fetchall(): # Parse columns. columns = OrderedSet() for statement in sqlparse.parse(check_clause): for token in statement.flatten(): if token.ttype in [ sqlparse.tokens.Name, sqlparse.tokens.Literal.String.Single ]: columns.add(token.value[1:-1]) constraints[constraint] = { 'columns': columns, 'primary_key': False, 'unique': False, 'index': False, 'check': True, 'foreign_key': None, } # Now add in the indexes cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name)) for table, non_unique, index, colseq, column, type_ in [ x[:5] + (x[10], ) for x in cursor.fetchall() ]: if index not in constraints: constraints[index] = { 'columns': OrderedSet(), 'primary_key': False, 'unique': False, 'check': False, 'foreign_key': None, } constraints[index]['index'] = True constraints[index][ 'type'] = Index.suffix if type_ == 'BTREE' else type_.lower() constraints[index]['columns'].add(column) # Convert the sorted sets to lists for constraint in constraints.values(): constraint['columns'] = list(constraint['columns']) return constraints
def test_discard(self): s = OrderedSet() self.assertEqual(len(s), 0) s.add(1) s.discard(2) self.assertEqual(len(s), 1)
def headings(self): headings = OrderedSet() for entry in self.entry_set.all(): for meta_data in entry.metadata_set.all(): headings.add(meta_data.key) return headings
def test_bool(self): # Refs #23664 s = OrderedSet() self.assertFalse(s) s.add(1) self.assertTrue(s)
def test_contains(self): s = OrderedSet() self.assertEqual(len(s), 0) s.add(1) self.assertIn(1, s)
def get(request): key_words = request.GET.get('s', '') current_type = request.GET.get('s_type', '') if current_type == "article": return_suggest_list = [] if key_words: s = JobboleBlogIndex.search() """fuzzy模糊搜索, fuzziness 编辑距离, prefix_length前面不变化的前缀长度""" s = s.suggest('my_suggest', key_words, completion={ "field": "suggest", "fuzzy": { "fuzziness": 2 }, "size": 10 }) suggestions = s.execute() for match in suggestions.suggest.my_suggest[0].options[:10]: source = match._source return_suggest_list.append(source["title"]) return HttpResponse(json.dumps(return_suggest_list), content_type="application/json") elif current_type == "job": return_suggest_list = [] if key_words: s = LagouJobIndex.search() s = s.suggest('my_suggest', key_words, completion={ "field": "suggest", "fuzzy": { "fuzziness": 2 }, "size": 10 }) suggestions = s.execute() # 对于不同公司同名职位去重,提高用户体验 name_set = OrderedSet() for match in suggestions.suggest.my_suggest[0].options[:10]: source = match._source name_set.add(source["title"]) for name in name_set: return_suggest_list.append(name) return HttpResponse(json.dumps(return_suggest_list), content_type="application/json") elif current_type == "question": return_suggest_list = [] if key_words: s_question = ZhiHuQuestionIndex.search() """fuzzy模糊搜索, fuzziness 编辑距离, prefix_length前面不变化的前缀长度""" s_question = s_question.suggest('my_suggest', key_words, completion={ "field": "suggest", "fuzzy": { "fuzziness": 2 }, "size": 10 }) suggestions_question = s_question.execute() for match in suggestions_question.suggest.my_suggest[ 0].options[:10]: source = match._source return_suggest_list.append(source["title"]) return HttpResponse(json.dumps(return_suggest_list), content_type="application/json")