def contribute_properties(self, parent): if not self.props_schemata: return props = dict((k, v) for k, v in self.props_schemata.items() if v.support_status.status != support.HIDDEN) required_props = dict((k, v) for k, v in props.items() if v.required) if required_props: section = self._section( parent, _('Required Properties'), '%s-props-req') for prop_key, prop in sorted(required_props.items(), key=cmp_to_key(self.cmp_prop)): self.contribute_property(section, prop_key, prop) optional_props = dict((k, v) for k, v in props.items() if not v.required) if optional_props: section = self._section( parent, _('Optional Properties'), '%s-props-opt') for prop_key, prop in sorted(optional_props.items(), key=cmp_to_key(self.cmp_prop)): self.contribute_property(section, prop_key, prop)
def getSynonyms(wordPos, symSet1, symSet2, wordPosInterested, sortFunc): word, pos = re.split(r"[$]", wordPos) result = '' if word in symSet1: if pos in symSet1[word]: cnt = 1 for idx in symSet1[word][pos]: wordPosSet = set([w + '$' + pos for w in symSet1[word][pos][idx]]) wordPosFirst = wordPosSet & wordPosInterested wordPosSecond = wordPosSet - wordPosFirst sortSet1 = [re.split(r"[$]", m)[0] for m in sorted(wordPosFirst, key = cmp_to_key(sortFunc))] sortSet2 = [re.split(r"[$]", m)[0] for m in sorted(wordPosSecond)] result += 'SymSet_1 %d[%s]\n' % (cnt, ", ".join(sortSet1) + ' ●' + ", ".join(sortSet2)) cnt += 1 if word in symSet2: if pos in symSet2[word]: cnt = 1 for idx in symSet2[word][pos]: wordPosSet = set([w + '$' + pos for w in symSet2[word][pos][idx]]) wordPosFirst = wordPosSet & wordPosInterested wordPosSecond = wordPosSet - wordPosFirst sortSet1 = [re.split(r"[$]", m)[0] for m in sorted(wordPosFirst, key = cmp_to_key(sortFunc))] sortSet2 = [re.split(r"[$]", m)[0] for m in sorted(wordPosSecond)] result += 'SymSet_2 %d[%s]\n' % (cnt, ", ".join(sortSet1) + ' ●' + ", ".join(sortSet2)) cnt += 1 return result
def convex_hull(self): """Return the convex hull of the shape. This uses grahams algorithm [O(V log V)] and caches the result inside self._convex_hull . """ verts = self.vertices() if len(verts) == 0: self._convex_hull = [] elif self._convex_hull is None: p = min(verts, key=cmp_to_key(point_cmp)) verts.remove(p) ps = sorted(verts, key=cmp_to_key(partial(clockwise_and_dist, p))) ps.append(p) hull = [p, ps[0]] i = 1 while clockwise_from(hull[0], hull[1], ps[i]) >= 0: hull.pop() hull.append(ps[i]) i += 1 hull.append(ps[i]) for l in ps[i:]: while clockwise_from(hull[-2], hull[-1], l) >= 0: hull.pop() hull.append(l) hull.pop() self._convex_hull = hull return self._convex_hull
def load_address_and_contact(doc, key=None): """Loads address list and contact list in `__onload`""" from frappe.contacts.doctype.address.address import get_address_display filters = [ ["Dynamic Link", "link_doctype", "=", doc.doctype], ["Dynamic Link", "link_name", "=", doc.name], ["Dynamic Link", "parenttype", "=", "Address"], ] address_list = frappe.get_all("Address", filters=filters, fields=["*"]) address_list = [a.update({"display": get_address_display(a)}) for a in address_list] address_list = sorted(address_list, key = functools.cmp_to_key(lambda a, b: (int(a.is_primary_address - b.is_primary_address)) or (1 if a.modified - b.modified else 0)), reverse=True) doc.set_onload('addr_list', address_list) contact_list = [] filters = [ ["Dynamic Link", "link_doctype", "=", doc.doctype], ["Dynamic Link", "link_name", "=", doc.name], ["Dynamic Link", "parenttype", "=", "Contact"], ] contact_list = frappe.get_all("Contact", filters=filters, fields=["*"]) contact_list = sorted(contact_list, key = functools.cmp_to_key(lambda a, b: (int(a.is_primary_contact - b.is_primary_contact)) or (1 if a.modified - b.modified else 0)), reverse=True) doc.set_onload('contact_list', contact_list)
def test_sorting_with_cmp(): @thesefy class Person(object): pass query = these((who, who2) for who in Person for who2 in Person) # Sort with tokens + filters sorted_parts = list(sorted(query.tokens + query.filters, key=functools.cmp_to_key(translation.cmp))) _k = functools.cmp_to_key(translation.cmp) for i, part in enumerate(sorted_parts): if i < len(sorted_parts) + 1: for after in sorted_parts[i+1:]: assert _k(part) <= _k(after), (part, after) # Now sort with filters + tokens sorted_parts = list(sorted(query.filters + query.tokens, key=functools.cmp_to_key(translation.cmp))) _k = functools.cmp_to_key(translation.cmp) for i, part in enumerate(sorted_parts): if i < len(sorted_parts) + 1: for after in sorted_parts[i+1:]: assert _k(part) <= _k(after), (part, after)
def build(self): "Build the list of directories with images" # The dict containing all information self.db = { 'paths_list': [], 'skipped_dir': [] } # get information for each directory for path, dirnames, filenames in os.walk(self.basepath, followlinks=True): relpath = os.path.relpath(path, self.basepath) # sort images and sub-albums by name if compat.PY2: filenames.sort(cmp=locale.strcoll) dirnames.sort(cmp=locale.strcoll) else: from functools import cmp_to_key filenames.sort(key=cmp_to_key(locale.strcoll)) dirnames.sort(key=cmp_to_key(locale.strcoll)) self.db['paths_list'].append(relpath) self.db[relpath] = { 'medias': [f for f in filenames if os.path.splitext(f)[1] in (self.img_ext_list + self.vid_ext_list)], 'subdir': dirnames } self.db[relpath].update(get_metadata(path)) path_media = [path for path in self.db['paths_list'] if self.db[path]['medias'] and path != '.'] path_nomedia = [path for path in self.db['paths_list'] if not self.db[path]['medias'] and path != '.'] # dir with images: check the thumbnail, and find it if necessary for path in path_media: self.check_thumbnail(path) # dir without images, start with the deepest ones for path in reversed(sorted(path_nomedia, key=lambda x: x.count('/'))): for subdir in self.get_subdirs(path): # use the thumbnail of their sub-directories if self.db[subdir].get('thumbnail', ''): self.db[path]['thumbnail'] = join( os.path.relpath(subdir, path), self.db[subdir]['thumbnail']) break if not self.db[path].get('thumbnail', ''): # else remove all info about this directory self.logger.info("Directory '%s' is empty", path) self.db['skipped_dir'].append(path) self.db['paths_list'].remove(path) del self.db[path] parent = os.path.normpath(join(path, '..')) child = os.path.relpath(path, parent) self.db[parent]['subdir'].remove(child)
def sorted_str(s): #过滤掉非字母字符 a = list(filter(str.isalpha,s)) #对字符串进行排序 b = sorted(a,key = functools.cmp_to_key(lambda x,y:1 if (x.upper()>y.upper()) else -1)) a.sort(key= functools.cmp_to_key(lambda x,y:1 if (x.upper()>y.upper()) else -1)) print("".join(a)) print("".join(b))
def _sort_buffer(self, buffer, lt): if lt in (operator.gt, operator.lt): buffer.sort(key=self._key_fn, reverse=self._reverse) elif self._key_fn: buffer.sort(key=cmp_to_key( (lambda a, b: (not lt(self._key_fn(a), self._key_fn(b))) - (not lt(self._key_fn(b), self._key_fn(a)))))) else: buffer.sort(key=cmp_to_key(lambda a, b: (not lt(a, b)) - (not lt(b, a))))
def largestNumber(nums): from functools import cmp_to_key def strCompare(n1, n2): # n1, n2: str if n1 + n2 > n2 + n1: return 1 elif n1 + n2 < n2 + n1: return -1 else: return 0 #enddef def intCompare(n1, n2): # n1, n2: int # TODO: # [52, 52]与[52,52,52]比较的情况(相等) dlist1 = getDigits(n1) dlist2 = getDigits(n2) if dlist1 == dlist2: return 0 len1 = len(dlist1) len2 = len(dlist2) i = j = 0 print(dlist1, dlist2) while 1: if dlist1[i] < dlist2[j]: return -1 elif dlist1[i] > dlist2[j]: return 1 else: i = (i + 1) % len1 j = (j + 1) % len2 #enddef def getDigits(num): if num == 0: return [0] dlist = [] digitNum = 0 tmp = num while tmp >= 1: tmp /= 10 digitNum += 1 for i in range(digitNum): digit = int(num/10**(digitNum-1)) - int(num/10**(digitNum)) dlist.append(digit) digitNum -= 1 num -= digit * 10 ** digitNum return dlist #enddef nums1 = sorted(nums, key = cmp_to_key(intCompare), reverse = True) nums2 = sorted(map(str, nums), key = cmp_to_key(strCompare), reverse = True) ret1 = ''.join(map(str, nums1)) ret2 = ''.join(map(str, nums1)) return (ret1, ret2)
def sortedRandint(): lst = [random.randint(-50,50) for i in range(10)] lst2 = filter(lambda n:n>0,lst) lst3 = map(lambda x:x*2,lst) #cmp的结果>0, 则将x放后面 #这是python2的写法,如果在py3中用这种方法,需要把原来的cmp转换成key c = sorted(lst,key = functools.cmp_to_key(lambda x,y:x-y)) #如果不写lambda,默认递增排序 #和楼上类似如果lambda结果=1,则将x放后面 lst.sort(key = functools.cmp_to_key(lambda x,y:1 if x>y else -1)) return lst3
def test_cmp_to_key(self): def cmp1(x, y): return (x > y) - (x < y) key = functools.cmp_to_key(cmp1) self.assertEqual(key(3), key(3)) self.assertGreater(key(3), key(1)) def cmp2(x, y): return int(x) - int(y) key = functools.cmp_to_key(cmp2) self.assertEqual(key(4.0), key('4')) self.assertLess(key(2), key('35'))
def subtree(self, folder_id, files, root, again=None): if again: print("dasd") else: list_id=0 nono=[] table_id = 0 image_list = wx.ImageList(16, 16) folder_icon = image_list.Add(wx.Image(resource_path("images/Folder-icon.png"), wx.BITMAP_TYPE_PNG).Scale(16,16).ConvertToBitmap()) self.tree_ctrl_1.AssignImageList(image_list) table = [] for folder in files.items(): if folder[1]['p'] == folder_id and folder[1]['t'] == 1: table.append(folder[1]['a']['n'] + folder[1]['h']) table_id = table_id + 1 table.sort(key = functools.cmp_to_key(locale.strcoll)) print table if table_id == 0: for folder4 in files.items(): if folder4[1]['t'] == 0 and folder4[1]['p'] == folder_id: xyz = self.tree_ctrl_1.AppendItem(root, folder4[1]['a']['n']) self.tree_ctrl_1.SetPyData(xyz, folder4[1]['h']) print folder4[1]['a']['n'] for c in range (0,table_id): folder_id_2 = table[c][-8:] folder_name_2 = table[c][:-8] print folder_name_2 print folder_id_2 abc = self.tree_ctrl_1.AppendItem(root, folder_name_2) self.tree_ctrl_1.SetPyData(abc, folder_id_2 + " folder") self.tree_ctrl_1.SetItemImage(abc, folder_icon, wx.TreeItemIcon_Normal) self.subtree(folder_id_2, files, abc, again="yes") if c == (table_id - 1): table_file = [] table_file_id = 0 for folder4 in files.items(): if folder4[1]['t'] == 0 and folder4[1]['p'] == folder_id: table_file.append(folder4[1]['a']['n'] + folder4[1]['h']) table_file_id = table_file_id + 1 for y in sorted(table_file, key=functools.cmp_to_key(locale.strcoll)): print y # file_id_2 = x[-8:] # file_name_2 = x[:-8] xyz = self.tree_ctrl_1.AppendItem(root, y[:-8]) self.tree_ctrl_1.SetPyData(xyz, y[-8:] + " file")
def _get_standings(self, on_swiss_over=False): #TODO:test tmp = [x for x in self.tournament.player_set.all()] if on_swiss_over: tmp.sort(key=cmp_to_key(Turn._compare2), reverse=True) else: tmp.sort(key=cmp_to_key(Turn._compare), reverse=True) standings = [] for i in range(len(tmp)): tmp[i].standing = i + 1 tmp[i].save() standings.append(tmp[i].gen_standing_dict()) return standings
def test_sort(self): u = self.type2test([1, 0]) u.sort() self.assertEqual(u, [0, 1]) u = self.type2test([2, 1, 0, -1, -2]) u.sort() self.assertEqual(u, self.type2test([-2, -1, 0, 1, 2])) self.assertRaises(TypeError, u.sort, 42, 42) def revcmp(a, b): if a == b: return 0 elif a < b: return 1 else: # a > b return -1 u.sort(key=cmp_to_key(revcmp)) self.assertEqual(u, self.type2test([2, 1, 0, -1, -2])) # The following dumps core in unpatched Python 1.5: def myComparison(x, y): xmod, ymod = x % 3, y % 7 if xmod == ymod: return 0 elif xmod < ymod: return -1 else: # xmod > ymod return 1 z = self.type2test(range(12)) z.sort(key=cmp_to_key(myComparison)) self.assertRaises(TypeError, z.sort, 2) def selfmodifyingComparison(x, y): z.append(1) if x == y: return 0 elif x < y: return -1 else: # x > y return 1 self.assertRaises(ValueError, z.sort, key=cmp_to_key(selfmodifyingComparison)) self.assertRaises(TypeError, z.sort, 42, 42, 42, 42)
def test_reverse_stability(self): data = [(random.randrange(100), i) for i in range(200)] copy1 = data[:] copy2 = data[:] def my_cmp(x, y): x0, y0 = x[0], y[0] return (x0 > y0) - (x0 < y0) def my_cmp_reversed(x, y): x0, y0 = x[0], y[0] return (y0 > x0) - (y0 < x0) data.sort(key=cmp_to_key(my_cmp), reverse=True) copy1.sort(key=cmp_to_key(my_cmp_reversed)) self.assertEqual(data, copy1) copy2.sort(key=lambda x: x[0], reverse=True) self.assertEqual(data, copy2)
def sort(self): """ Sort the population """ if self.sorted: return rev = (self.minimax == Consts.minimaxType["maximize"]) if self.sortType == Consts.sortType["raw"]: self.internalPop.sort(key=cmp_to_key(Util.cmp_individual_raw), reverse=rev) else: self.scale() self.internalPop.sort(key=cmp_to_key(Util.cmp_individual_scaled), reverse=rev) self.internalPopRaw = self.internalPop[:] self.internalPopRaw.sort(key=cmp_to_key(Util.cmp_individual_raw), reverse=rev) self.sorted = True
def execute(self, fr): fr.check_underflow(2) flags = fr.data_pop(int) arr = fr.data_pop_list()[:] nocase = flags & 1 != 0 dorev = flags & 2 != 0 doshuffle = flags & 4 != 0 if doshuffle: for i in range(7): random.shuffle(arr) elif nocase: arr = sorted(arr, key=cmp_to_key(si.sortcompi), reverse=dorev) else: arr = sorted(arr, key=cmp_to_key(si.sortcomp), reverse=dorev) fr.data_push_list(arr)
def sort_func(args): """ Auxiliary function used to sort todos. We put the most important items on the bottom of the list because the terminal scrolls with the output. Items with an immediate due date are considered more important that those for which we have more time. """ db, todo = args rv = [] for field in fields: field = field.lower() neg = field.startswith('-') if neg: # Remove that '-' field = field[1:] value = getattr(todo, field) if field in ('due', 'created_at', 'completed_at'): value = value.timestamp() if value else float('inf') if neg: # This "negates" the value, whichever type. The lambda is the # same as Python 2's `cmp` builtin, but with inverted output # (-1 instead of 1 etc). value = functools.cmp_to_key( lambda a, b: -((a > b) - (a < b)))(value) rv.append(value) return rv
def repr_path(self): """ Construct string representation of the erm, summarizing dependencies between tables :return: string representation of the erm """ if len(self) == 0: return "No relations to show" paths = self.longest_paths() # turn comparator into Key object for use in sort k = cmp_to_key(self.compare_path) sorted_paths = sorted(paths, key=k) # table name will be padded to match the longest table name node_labels = self.node_labels n = max([len(x) for x in node_labels.values()]) + 1 rep = '' for path in sorted_paths: rep += self.repr_path_with_depth(path, n) for node in self.lone_nodes: rep += node_labels[node] + '\n' return rep
def _get_metacontacts_big_brother(self, family): """ Which of the family will be the big brother under wich all others will be ? """ family.sort(key=cmp_to_key(self._compare_metacontacts)) return family[-1]
def largestNumber(self, nums): """ :type nums: List[int] :rtype: str """ nums = sorted(map(str, nums), key=cmp_to_key(self.cmp)) return str(int(''.join(nums)))
def run(self, table): """ :returns: :class:`int` """ column = table.columns[self._column_name] if self._comparer: if six.PY3: data_sorted = sorted(column.values(), key=cmp_to_key(self._comparer)) else: # pragma: no cover data_sorted = sorted(column.values(), cmp=self._comparer) else: data_sorted = column.values_sorted() if self._reverse: data_sorted.reverse() ranks = {} rank = 0 for c in data_sorted: rank += 1 if c in ranks: continue ranks[c] = Decimal(rank) new_column = [] for row in table.rows: new_column.append(ranks[row[self._column_name]]) return new_column
def test_undetected_mutation(self): # Python 2.4a1 did not always detect mutation memorywaster = [] for i in range(20): def mutating_cmp(x, y): L.append(3) L.pop() return (x > y) - (x < y) L = [1,2] self.assertRaises(ValueError, L.sort, key=cmp_to_key(mutating_cmp)) def mutating_cmp(x, y): L.append(3) del L[:] return (x > y) - (x < y) self.assertRaises(ValueError, L.sort, key=cmp_to_key(mutating_cmp)) memorywaster = [memorywaster]
def _action_sort_key_function(case): def _action_cmp(first_action, second_action): # if the forms aren't submitted by the same user, just default to server dates if first_action.user_id != second_action.user_id: return cmp(first_action.server_date, second_action.server_date) else: form_ids = list(case.xform_ids) def _sortkey(action): if not action.server_date or not action.date: raise MissingServerDate() form_cmp = lambda form_id: (form_ids.index(form_id) if form_id in form_ids else sys.maxint, form_id) # if the user is the same you should compare with the special logic below # if the user is not the same you should compare just using received_on return ( # this is sneaky - it's designed to use just the date for the # server time in case the phone submits two forms quickly out of order action.server_date.date(), action.date, form_cmp(action.xform_id), _type_sort(action.action_type), ) return cmp(_sortkey(first_action), _sortkey(second_action)) return cmp_to_key(_action_cmp)
def dump(self, filename): dplog.debug( "Creating ngram file (%s, N=%d)..." % (filename, self.N_max) ) file = open(filename, 'w') #file.write( 'ngrams : ' + str(len(self)) + '\n' ) # len(self)表示所有ngrams总数 file.write( str(len(self)) + '\n' ) #file.write( 'alphabet_size : ' + str(self.alphabet_size) + '\n' ) pbar = MyProgressBar('Dumping', len(self)) i = 0 for gram in sorted(self.keys(), key=cmp_to_key(compare_grams)): # NOTE: ord(x)+1 should be in order to remain compatible with the input format file.write("%s : %f\n" % (" ".join(map(lambda x: str(ord(x)+1), gram)),self[gram])) # file.write( "%s : %f\n" % ((seqToStr(gram, inc=1), self[gram])) ) i += 1 pbar.update(i) pbar.finish() ''' pbar = MyProgressBar('Dumping', len(self.lines)) i = 0 for line in self.lines: file.write( "%s\n" % (" ".join(line)) ) i += 1 pbar.update(i) pbar.finish() ''' file.close()
def check(tag, expected, raw, compare=None): global nerrors if verbose: print(" checking", tag) orig = raw[:] # save input in case of error if compare: raw.sort(key=cmp_to_key(compare)) else: raw.sort() if len(expected) != len(raw): print("error in", tag) print("length mismatch;", len(expected), len(raw)) print(expected) print(orig) print(raw) nerrors += 1 return for i, good in enumerate(expected): maybe = raw[i] if good is not maybe: print("error in", tag) print("out of order at index", i, good, maybe) print(expected) print(orig) print(raw) nerrors += 1 return
def sort_structure(self, structure): """Sort index structure in lower-case, alphabetical order Compare argument is a key/value structure, if the compare argument is a leaf node, which has `title` key in its value, use the title value, else use the key to compare. """ def _cmp(arg1, arg2): arg1 = arg1[1]["title"] if "title" in arg1[1] else arg1[0] arg2 = arg2[1]["title"] if "title" in arg2[1] else arg2[0] # cmp not exists in py3 # via <https://docs.python.org/3.0/whatsnew/3.0.html#ordering-comparisons> cmp = lambda x, y: (x > y) - (x < y) return cmp(arg1.lower(), arg2.lower()) if is_py2: sorted_opts = {'cmp': _cmp} elif is_py3: sorted_opts = {'key': cmp_to_key(_cmp)} sorted_structure = copy.deepcopy(structure) for k, _ in sorted_structure.items(): sorted_structure = OrderedDict(sorted( sorted_structure.items(), **sorted_opts )) if k.endswith(".{0}".format(self.site_config["default_ext"])): continue sorted_structure[k] = self.sort_structure(sorted_structure[k]) return sorted_structure
def __init__(self,separator = None,escape = None,endOfLine = None): self.sep = None self.esc = None self.eol = None self.inp = None self.eolsize = None self.buffer = None self.pos = None self.bufferOffset = None self.cachedToken = None self.cachedPos = None _g = self if (separator is not None): self.sep = separator else: self.sep = "," if (self.stringLength(self.sep) != 1): raise _HxException((("Separator string \"" + HxOverrides.stringOrNull(self.sep)) + "\" not allowed, only single char")) if (escape is not None): self.esc = escape else: self.esc = "\"" if (self.stringLength(self.esc) != 1): raise _HxException((("Escape string \"" + HxOverrides.stringOrNull(self.esc)) + "\" not allowed, only single char")) if (endOfLine is not None): self.eol = endOfLine else: self.eol = ["\r\n", "\n"] if (Lambda.has(self.eol,None) or Lambda.has(self.eol,"")): raise _HxException("EOL sequences can't be empty") def _hx_local_0(a,b): return (_g.stringLength(b) - _g.stringLength(a)) self.eol.sort(key= python_lib_Functools.cmp_to_key(_hx_local_0)) self.eolsize = list(map(self.stringLength,self.eol)) self.open(None,None)
def test_named_consonants(self): consonants = [consonant_ka, consonant_nga, consonant_ca, #consonant_ja, consonant_nya, consonant_tta, consonant_nna, consonant_nnna, consonant_ta, consonant_tha, consonant_na, consonant_pa, consonant_ma, consonant_ya, consonant_ra, consonant_rra, consonant_la, consonant_lla, consonant_llla, consonant_zha, consonant_va] #this array has a few duplicates if PYTHON3: asorted = tamil_sorted(agaram_letters) consonants = sorted(list(set(consonants)),key=cmp_to_key(compare_words_lexicographic)) else: asorted = tamil_sorted(agaram_letters) consonants = sorted(list(set(consonants)),cmp=compare_words_lexicographic) self.assertEqual(asorted,consonants)
def sort_stats(self, *field): if not field: self.fcn_list = 0 return self if len(field) == 1 and isinstance(field[0], (int, long)): # Be compatible with old profiler field = [{-1: "stdname", 0: "calls", 1: "time", 2: "cumulative"}[field[0]]] sort_arg_defs = self.get_sort_arg_defs() sort_tuple = () self.sort_type = "" connector = "" for word in field: sort_tuple = sort_tuple + sort_arg_defs[word][0] self.sort_type += connector + sort_arg_defs[word][1] connector = ", " stats_list = [] for func, (cc, nc, tt, ct, callers) in self.stats.iteritems(): stats_list.append((cc, nc, tt, ct) + func + (func_std_string(func), func)) stats_list.sort(key=cmp_to_key(TupleComp(sort_tuple).compare)) self.fcn_list = fcn_list = [] for tuple in stats_list: fcn_list.append(tuple[-1]) return self
def problem_sort(self): self.problems.sort(key=cmp_to_key(custom_problem_sort))
def cmp_to_key(*args, **kwargs): if PYTHON_3: return dict(key=functools.cmp_to_key(*args, **kwargs)) #@UndefinedVariable else: return dict(cmp=args[0])
def sorted_cmp(it, cmp): return sorted(it, key=cmp_to_key(cmp))
def simulate(server_num, arrival_l, dead_m, sc_r, cores_lambda): """ :param ser_num: total number of servers :param arr_l: the arrival times lambda :param dead_m: the mead deadline for each task :param sc_r: scheduler rate :param cores: 2D array of cores. Each row is the set of all cores of a server """ global arrivals global time global sum_of_the_length_of_the_sch_q, sum_of_the_length_of_the_sch_q_power_of_2, sum_of_the_length_of_the_server_qs, sum_of_the_length_of_the_server_qs_power_of_2 global n_task_q, n_task1_q, n_task2_q, sum_of_time_spent_in_the_q, sum_of_time_spent_in_the_q_class1, sum_of_time_spent_in_the_q_class2, sum_of_time_spent_in_the_q_power_of_2, sum_of_time_spent_in_the_q_power_of_2_class1, sum_of_time_spent_in_the_q_power_of_2_class2 global n_task, n_task1, n_task2, sum_of_time_spent_in_the_system, sum_of_time_spent_in_the_system_class1, sum_of_time_spent_in_the_system_class2, sum_of_time_spent_in_the_system_power_of_2, sum_of_time_spent_in_the_system_power_of_2_class1, sum_of_time_spent_in_the_system_power_of_2_class2 fel = [] # the future event list server_Q = [[] for _ in range(server_num) ] # 2D array. each row is Q for one server schdle_Q = [] # Q of the scheduler # time of the system cores = [[-1 for j in range(len(cores_lambda[i]))] for i in range(len(cores_lambda))] # makes a 2D array for cores, tasks place in cores, -1 indicates empty state # fel.append(Event('END', 50000, '')) fel.append(Event('arrival', next_task_time(time, arrival_l), '')) fel.append(Event('sc_s', exponentialGenerator(sc_r) + time, '')) # the time scheduler works arrivals = 0 while (True): fel.sort(key=lambda x: x.time) e = fel.pop(0) # pop the first event in the list time = e.time if (number_of_departed_tasks > 5000): sum_of_the_length_of_the_sch_q += len(schdle_Q) sum_of_the_length_of_the_sch_q_power_of_2 += len(schdle_Q)**2 for i in range(len(server_Q)): sum_of_the_length_of_the_server_qs[i] += len(server_Q[i]) sum_of_the_length_of_the_server_qs_power_of_2[i] += len( server_Q[i])**2 print('........................') print(e.kind, e.description) print('scheduler_q: ', len(schdle_Q)) print('serverQ: ', [len(_) for _ in server_Q]) print('cores', cores) print('--------------------------') if e.kind == 'arrival': # a new customer arrives task, next_arrival = task_generator(arrival_l, dead_m, time) arrivals += 1 schdle_Q.append(task) # adding the arrived task to the scheduler Q fel.append( Event('arrival', next_arrival, '')) # generating a future event for the next arrival schdle_Q.sort(key=cmp_to_key(task_compare)) fel.append(Event('deadline', task.deadline, str(id(task)))) pass elif e.kind == 'sc_s': fel.append(Event('sc_s', exponentialGenerator(sc_r) + time, '')) schedule(schdle_Q, server_Q, cores, cores_lambda, fel, time) # schedule a task if the Q is not empty # scheduler should work here pass elif e.kind == 'dep': # a task is done departure(e.description, server_Q, cores, cores_lambda, time, fel) pass elif e.kind == 'deadline': # a task deadline has come deadline_remover(server_Q, schdle_Q, e.description) pass elif e.kind == 'END': # this is the end of simualtion return elif number_of_departed_tasks > 50000000: return if number_of_departed_tasks > 5000: a1 = accuracy_check(sum_of_time_spent_in_the_system, sum_of_time_spent_in_the_system_power_of_2, n_task) a2 = accuracy_check( sum_of_time_spent_in_the_system_class1, sum_of_time_spent_in_the_system_power_of_2_class1, n_task1) a3 = accuracy_check( sum_of_time_spent_in_the_system_class2, sum_of_time_spent_in_the_system_power_of_2_class2, n_task2) a4 = accuracy_check(sum_of_time_spent_in_the_q, sum_of_time_spent_in_the_q_power_of_2, n_task_q) a5 = accuracy_check(sum_of_time_spent_in_the_q_class1, sum_of_time_spent_in_the_q_power_of_2_class1, n_task1_q) a6 = accuracy_check(sum_of_time_spent_in_the_q_class2, sum_of_time_spent_in_the_q_power_of_2_class2, n_task2_q) a7 = accuracy_check(sum_of_the_length_of_the_sch_q, sum_of_the_length_of_the_sch_q_power_of_2, time) if a1 == True or a2 == True or a3 == True or a4 == True or a5 == True or a6 == True or a7 == True: return for i in range(len(sum_of_the_length_of_the_server_qs)): a = accuracy_check( sum_of_the_length_of_the_server_qs[i], sum_of_the_length_of_the_server_qs_power_of_2[i], time) if a == True: return
def sort(self, items, query): return sorted(items, key=functools.cmp_to_key(self.cmp_source_health))
_INPUT = """\ 3 1 1 2 1 1 2 """ sys.stdin = io.StringIO(_INPUT) from functools import cmp_to_key N = int(input()) xy = [list(map(int, input().split())) for _ in range(N)] def cmp(a, b): if a[1] * (b[0] - 1) == b[1] * (a[0] - 1): return 0 return -1 if a[1] * (b[0] - 1) < b[1] * (a[0] - 1) else 1 xy = sorted(xy, key=cmp_to_key(cmp)) ans = 0 now = [10**10, 0] for i, [x, y] in enumerate(xy): if now[1] * x <= now[0] * (y - 1): ans += 1 now = [x - 1, y] print(ans)
from collections import defaultdict from functools import cmp_to_key from .basic import Basic from .compatibility import reduce, is_sequence from .parameters import global_parameters from .logic import _fuzzy_group, fuzzy_or, fuzzy_not from .singleton import S from .operations import AssocOp from .cache import cacheit from .numbers import ilcm, igcd from .expr import Expr # Key for sorting commutative args in canonical order _args_sortkey = cmp_to_key(Basic.compare) def _addsort(args): # in-place sorting of args args.sort(key=_args_sortkey) def _unevaluated_Add(*args): """Return a well-formed unevaluated Add: Numbers are collected and put in slot 0 and args are sorted. Use this when args have changed but you still want to return an unevaluated Add. Examples ========
parents_b = [ r['commit'] for r in review_b['revisions'][current_b]['commit']['parents'] ] if current_a in parents_b: return -1 elif current_b in parents_a: return 1 else: return cmp(review_a['number'], review_b['number']) if args.topic: reviews = fetch_query(args.gerrit, 'topic:{0}'.format(args.topic)) change_numbers = [ str(r['number']) for r in sorted(reviews, key=cmp_to_key(cmp_reviews)) ] if args.query: reviews = fetch_query(args.gerrit, args.query) change_numbers = [ str(r['number']) for r in sorted(reviews, key=cmp_to_key(cmp_reviews)) ] if args.change_number: change_url_re = re.compile('https?://.+?/([0-9]+(?:/[0-9]+)?)/?') for c in args.change_number: change_number = change_url_re.findall(c) if change_number: change_numbers.extend(change_number) elif '-' in c: templist = c.split('-')
def plan(data: dict): load_to_produce = data['load'] fuels = data['fuels'] plants = [PowerPlant.from_json(plant) for plant in data['powerplants']] # Step One : Sort plants by cost per MWh plants = sorted(plants, key=cmp_to_key(lambda a, b: compare_plants(a, b, fuels))) # Output containing adjusted loads by plant name load_distribution = dict() # List of retained plants loaded_plants = list() # Load to adjust to take into account plant starting power plants_to_recover = 0 load_to_recover = 0 # Step Two : Compute plant loads over global demand for plant in plants: if load_to_produce > 0: # Compute plant capacity and compare with demand plant_capacity = min(load_to_produce, plant.get_capacity(fuels)) # If minimal power is sufficient, the load is computed over global demand if plant.minimal_power <= load_to_produce: # Only plants having a real capacity are retained # for example wind turbines without wind are excluded if plant_capacity > 0: loaded_plants.append(plant) load_to_produce -= plant_capacity load_distribution[plant.name] = plant_capacity plants_to_recover += 1 else: # Other plants are excluded from power generation load_distribution[plant.name] = 0 else: # When minimal power is an issue, the plant is added with minimal capacity # The exceeding load will be recovered in the next step if plants_to_recover > 0: loaded_plants.append(plant) load_to_recover = plant.minimal_power - load_to_produce load_to_produce = 0 load_distribution[plant.name] = plant.minimal_power else: load_distribution[plant.name] = 0 else: # Total demand was already covered load_distribution[plant.name] = 0 # If there is a load to recover, the distribution must be adjusted regarding plant starting power if load_to_recover > 0: loaded_plants.reverse() # Step Three : (optional) Recover exceeding power generation for plant in loaded_plants: if load_to_recover > 0: current_load = load_distribution[plant.name] # Current load can only be adjusted when minimal power is exceeded if current_load > plant.minimal_power: plant_capacity = max( plant.minimal_power, load_distribution[plant.name] - load_to_recover) load_distribution[plant.name] = plant_capacity load_to_recover -= plant_capacity if load_to_produce > 0 or load_to_recover > 0: abort(400) return json.dumps([{ 'name': key, 'p': value } for key, value in load_distribution.items()], indent=1)
def get(self): for slug, data in self.data.items(): sorted_versions = sorted(data["versions"], key=cmp_to_key(SemverCmp)) self.data[slug]["latest_version"] = sorted_versions[-1] return self.data
def __call__(self): # Output info header and includes self.h("""\ /* * This file contains D-Bus client proxy classes generated by qt-client-gen.py. * * This file can be distributed under the same terms as the specification from * which it was generated. */ """) if self.must_define: self.h('\n') self.h('#ifndef %s\n' % self.must_define) self.h('#error %s\n' % self.must_define) self.h('#endif\n') self.h('\n') if self.extraincludes: for include in self.extraincludes.split(','): self.h('#include %s\n' % include) self.h(""" #include <QtGlobal> #include <QString> #include <QObject> #include <QVariant> #include <QDBusPendingReply> #include <TelepathyQt/AbstractInterface> #include <TelepathyQt/DBusProxy> #include <TelepathyQt/Global> namespace Tp { class PendingVariant; class PendingOperation; } """) if self.must_define: self.b("""#define %s\n""" % (self.must_define)) self.b("""#include "%s" """ % self.realinclude) # Begin namespace for ns in self.namespace.split('::'): self.hb("""\ namespace %s { """ % ns) # Output interface proxies def ifacenodecmp(x, y): xname, yname = [ self.namespace + '::' + node.getAttribute('name').replace('/', '').replace('_', '') + 'Interface' for node in (x, y) ] if xname == self.mainiface: return -1 elif yname == self.mainiface: return 1 else: return cmp(xname, yname) self.ifacenodes = sorted(self.ifacenodes, key=functools.cmp_to_key(ifacenodecmp)) for ifacenode in self.ifacenodes: self.do_ifacenode(ifacenode) # End namespace self.hb(''.join(['}\n' for ns in self.namespace.split('::')])) # Add metatype declaration - otherwise QTBUG #2151 might be triggered for ifacenode in self.ifacenodes: classname = ifacenode.getAttribute('name').replace( '/', '').replace('_', '') + 'Interface' self.h("Q_DECLARE_METATYPE(" + self.namespace + "::" + classname + "*)\n") # Write output to files (codecs.getwriter('utf-8')(open(self.headerfile, 'wb'))).write(''.join(self.hs)) (codecs.getwriter('utf-8')(open(self.implfile, 'wb'))).write(''.join(self.bs))
import csv import functools import locale import time def word_to_value(string): val = 0 for char in string: val += ord(char) - 96 return val t = time.time() with open('p022_names.txt', 'r') as names: names_string = names.read().lower() names_string = sorted(names_string.replace('"', '').split(","), key=functools.cmp_to_key(locale.strcoll)) print names_string[0] total_val = 0 for i in names_string: total_val += word_to_value(i) * (names_string.index(i) + 1) print total_val print time.time() - t
if PY3: x['DictItemsType'] = _dict.items() # 2.7 x['DictKeysType'] = _dict.keys() # 2.7 x['DictValuesType'] = _dict.values() # 2.7 else: x['DictItemsType'] = _dict.viewitems() # 2.7 x['DictKeysType'] = _dict.viewkeys() # 2.7 x['DictValuesType'] = _dict.viewvalues() # 2.7 # generic operating system services (CH 15) x['RawTextHelpFormatterType'] = argparse.RawTextHelpFormatter('PROG') x['RawDescriptionHelpFormatterType'] = argparse.RawDescriptionHelpFormatter('PROG') x['ArgDefaultsHelpFormatterType'] = argparse.ArgumentDefaultsHelpFormatter('PROG') except NameError: pass try: # python 2.7 (and not 3.1) x['CmpKeyType'] = _cmpkey = functools.cmp_to_key(_methodwrap) # 2.7, >=3.2 x['CmpKeyObjType'] = _cmpkey('0') #2.7, >=3.2 except AttributeError: pass if PY3: # oddities: removed, etc x['BufferType'] = x['MemoryType'] else: x['BufferType'] = buffer('') # -- cleanup ---------------------------------------------------------------- a.update(d) # registered also succeed if sys.platform[:3] == 'win': os.close(_filedescrip) # required on win32 os.remove(_tempfile)
def _sortTasks(self, tasks): tasks.sort(key=cmp_to_key(todolib.compareTasks))
def get_bom_items(bom, company, qty=1, fetch_exploded=1): items = get_bom_items_as_dict(bom, company, qty, fetch_exploded).values() items = list(items) items.sort(key=functools.cmp_to_key( lambda a, b: a.item_code > b.item_code and 1 or -1)) return items
def _plot_public_data_statistics(self, all_data, version_attr_name, title_name, label_cb, is_flight_hours=True): """ generic method to plot flight hours one data type :param all_data: list with all types as string :param version_attr_name: attribute name of _VersionData :param title_name: name of the data for the title (and hover tool) :param label_cb: callback to create the label :param is_flight_hours: if True, this shows the flight hours, nr of flights otherwise :return: bokeh plot """ if is_flight_hours: title_prefix = 'Flight hours' else: title_prefix = 'Number of Flights' # change data structure data_hours = {} # key=data id, value=list of hours for each version for d in all_data: data_hours[d] = [] versions = [] # sorted list of all versions for ver in sorted(self._version_data, key=functools.cmp_to_key(_Log.compare_version)): versions.append(ver) # all data points of the requested type for this version version_type_data = getattr(self._version_data[ver], version_attr_name) for d in all_data: if not d in version_type_data: version_type_data[d] = 0. data_hours[d].append(version_type_data[d]) # cumulative over each version for key in all_data: data_hours[key] = np.array(data_hours[key]) data_hours[key+"_cum"] = np.cumsum(data_hours[key]) # create a 2D numpy array. We could directly pass the dict to the bokeh # plot, but then we don't have control over the sorting order X = np.zeros((len(all_data), len(versions))) i = 0 all_data_sorted = [] for key in sorted(all_data, key=lambda data_key: data_hours[data_key+"_cum"][-1]): X[i, :] = data_hours[key+"_cum"] all_data_sorted.append(key) i += 1 all_data = all_data_sorted colors = viridis(len(all_data)) area = figure(title=title_prefix+" per "+title_name, tools=TOOLS, active_scroll=ACTIVE_SCROLL_TOOLS, x_axis_label='version (including development states)', y_axis_label='') # stack the data: we'll need it for the hover tool & the patches last = np.zeros(len(versions)) stacked_patches = [] # polygon y positions: one per data item for i in range(len(all_data)): next_data = last + X[i, :] # for the stacked patches, we store a polygon: left-to-right, then right-to-left stacked_patches.append(np.hstack((last[::-1], next_data))) data_hours[all_data[i]+'_stacked'] = next_data last = next_data data_hours['x'] = np.arange(len(versions)) # group minor versions closer together by manipulating the x-position # (we could use the release dates but we don't have that information for # all versions) grouping_factor = 3 # higher=stronger grouping, 0=disabled versions_spaced = [] prev_version = versions[0] for i in range(len(versions)): version = versions[i] if prev_version.split('.')[0:2] == version.split('.')[0:2]: version_display = 'x.'+version.split('.')[2] else: versions_spaced.extend(['']*grouping_factor) version_display = version data_hours['x'][i] = len(versions_spaced) versions_spaced.append(version_display) prev_version = version # hover tool if is_flight_hours: str_format = '{0,0.0}' else: str_format = '{0,0}' source = ColumnDataSource(data=data_hours) for d in all_data: renderer = area.circle(x='x', y=d+'_stacked', source=source, size=10, alpha=0, name=d) g1_hover = HoverTool( renderers=[renderer], tooltips=[(title_name, label_cb(d, True)), (title_prefix+' (only this version)', '@'+d+str_format), (title_prefix+' (up to this version)', '@'+d+'_cum'+str_format)]) area.add_tools(g1_hover) # now plot the patches (polygons) x = data_hours['x'] x2 = np.hstack((x[::-1], x)) for i in range(len(all_data)): area.patch(x2, stacked_patches[i], color=colors[i], legend=label_cb(all_data[i], False), alpha=0.8, line_color=None) if area.legend: area.legend[0].items.reverse() area.xaxis.formatter = FuncTickFormatter(code=""" var versions = """ + str(versions_spaced) + """; return versions[Math.floor(tick)] """) area.xaxis.ticker = FixedTicker(ticks=list(data_hours['x'])) # decrease size a bit to fit all items area.legend.label_text_font_size = '8pt' area.legend.label_height = 8 area.legend.glyph_height = 10 self._setup_plot(area) return area
def search_and_sign_unsinged(self): """Search through last 'count' releases with assets without .asc counterparts or releases withouth SHA256SUMS.txt.asc """ print('Sign releases on repo: %s' % self.repo) print(' With key: %s, %s\n' % (self.keyid, self.uid)) releases = get_releases(self.repo) if self.tag_name: releases = [r for r in releases if r.get('tag_name', None) == self.tag_name] if len(releases) == 0: print('No release with tag "%s" found, exit' % self.tag_name) sys.exit(1) elif not self.sign_drafts: releases = [r for r in releases if not r.get('draft', False)] # cycle through releases sorted by by publication date releases.sort(key=cmp_to_key(compare_published_times)) for r in releases[:self.count]: tag_name = r.get('tag_name', 'No tag_name') is_draft = r.get('draft', False) is_prerelease = r.get('prerelease', False) created_at = r.get('created_at', '') msg = 'Found %s%s tagged: %s, created at: %s' % ( 'draft ' if is_draft else '', 'prerelease' if is_prerelease else 'release', tag_name, created_at ) if not is_draft: msg += ', published at: %s' % r.get('published_at', '') print(msg) asset_names = [a['name'] for a in r['assets']] if not asset_names: print(' No assets found, skip release\n') continue asc_names = [a for a in asset_names if a.endswith('.asc')] other_names = [a for a in asset_names if not a.endswith('.asc')] need_to_sign = False if asset_names and not asc_names: need_to_sign = True if not need_to_sign: for name in other_names: if not '%s.asc' % name in asc_names: need_to_sign = True break if not need_to_sign: need_to_sign = '%s.asc' % SHA_FNAME not in asc_names if need_to_sign or self.force: self.sign_release(r, other_names, asc_names, r==releases[0]) else: print(' Seems already signed, skip release\n')
self.score = score def __repr__(self): return "{0}, {1}".format(name, score) @staticmethod def comparator(a, b): if a.score > b.score: return -1 elif a.score < b.score: return 1 else: if a.name > b.name: return 1 elif a.name < b.name: return -1 else: return 0 n = int(input()) data = [] for i in range(n): name, score = input().split() score = int(score) player = Player(name, score) data.append(player) data = sorted(data, key=cmp_to_key(Player.comparator)) for i in data: print(i.name, i.score)
# Add the subresult to result once we have traversed all the elements # in the array return subRes.append(arr[n]) sol(arr, n + 1, N, res, subRes) subRes.pop() sol(arr, n + 1, N, res, subRes) def comp(a, b): """ Custom comparator """ for i in range(min(len(a), len(b))): if a[i] < b[i]: return -1 elif a[i] > b[i]: return 1 return -1 # This to handle empty element res = [] arr = [8, 1, 8, 6, 8] arr.sort() sol(arr, 0, len(arr), res) #print(res) print(sorted(res, key=cmp_to_key(comp))) #()(1)(1 2)(1 2 3)(1 2 3 3)(1 3)(1 3 3)(2)(2 3)(2 3 3)(3)(3 3) #()(1)(1 6)(1 6 8)(1 6 8 8)(1 6 8 8 8)(1 8)(1 8 8)(1 8 8 8)(6)(6 8)(6 8 8)(6 8 8 8)(8)(8 8)(8 8 8)
def __init__(self, plot_config, verbose_output=False): self._config = plot_config self._verbose_output = verbose_output # lists of dates when a _log was uploaded, one list per type self._public_logs_dates = [] self._private_logs_dates = [] self._ci_logs_dates = [] self._all_logs_dates = [] self._public_logs = [] # read from the DB con = sqlite3.connect(get_db_filename(), detect_types=sqlite3.PARSE_DECLTYPES) with con: cur = con.cursor() cur.execute('select Id, Date, Source, Public, Rating from Logs') db_tuples = cur.fetchall() for db_tuple in db_tuples: log = _Log(db_tuple) self._all_logs_dates.append(log.date) if log.is_public == 1: if log.source == 'CI': self._ci_logs_dates.append(log.date) else: self._public_logs_dates.append(log.date) else: if log.source == 'CI': self._ci_logs_dates.append(log.date) else: self._private_logs_dates.append(log.date) # LogsGenerated: public only if log.is_public != 1 or log.source == 'CI': continue cur.execute('select * from LogsGenerated where Id = ?', [log.log_id]) db_tuple = cur.fetchone() if db_tuple is None: print("Error: no generated data") continue log.set_generated(db_tuple) # filter bogus entries if log.sw_version == 'v0.0.0': if self._verbose_output: print('Warning: %s with version=v0.0.0' % log.log_id) continue if log.duration > 7*24*3600: # probably bogus timestamp(s) if self._verbose_output: print('Warning: %s with very high duration %i' % (log.log_id, log.duration)) continue if log.sw_version == '': # FIXME: does that still occur and if so why? if self._verbose_output: print('Warning: %s version not set' % log.log_id) continue if log.autostart_id == 0: print('Warning: %s with autostart_id=0' % log.log_id) continue try: if int(log.sw_version[1:].split('.')[0]) > 10: print('Warning: %s with large version %s' % (log.log_id, log.sw_version)) continue except: continue self._public_logs.append(log) self._version_data = {} # dict of _VersionData items self._all_airframes = set() self._all_boards = set() self._all_ratings = set() self._all_flight_modes = set() self._total_duration = 0 # in hours, public logs only self._total_last_version_duration = 0 # in hours, public logs only self._latest_major_release = "" for log in self._public_logs: if not log.sw_version in self._version_data: self._version_data[log.sw_version] = _VersionData() self._all_airframes.add(str(log.autostart_id)) self._all_boards.add(log.hardware) self._all_ratings.add(log.rating) cur_version_data = self._version_data[log.sw_version] boards = cur_version_data.boards boards_num_logs = cur_version_data.boards_num_logs airframes = cur_version_data.airframes airframes_num_logs = cur_version_data.airframes_num_logs ratings = cur_version_data.ratings flight_modes = cur_version_data.flight_mode_durations if not log.hardware in boards: boards[log.hardware] = 0 boards_num_logs[log.hardware] = 0 boards[log.hardware] += log.duration / 3600. boards_num_logs[log.hardware] += 1 for flight_mode, duration in log.flight_mode_durations: flight_mode_str = str(flight_mode) self._all_flight_modes.add(flight_mode_str) if not flight_mode_str in flight_modes: flight_modes[flight_mode_str] = 0. flight_modes[flight_mode_str] += duration / 3600. autostart_str = str(log.autostart_id) if not autostart_str in airframes: airframes[autostart_str] = 0 airframes_num_logs[autostart_str] = 0 airframes[autostart_str] += log.duration / 3600. airframes_num_logs[autostart_str] += 1 if not log.rating in ratings: ratings[log.rating] = 0 ratings[log.rating] += 1 self._total_duration += log.duration / 3600. if len(self._version_data) > 0: latest_version = sorted( self._version_data, key=functools.cmp_to_key(_Log.compare_version))[-1] latest_major_version = latest_version.split('.')[0:2] self._latest_major_release = '.'.join(latest_major_version) for log in self._public_logs: if log.sw_version.split('.')[0:2] == latest_major_version: self._total_last_version_duration += log.duration / 3600.
import sys import functools def compare(a, b): a = str(a) b = str(b) return int(a + b) - int(b + a) if __name__ == '__main__': t = int(sys.stdin.readline()) for _ in range(t): n = int(sys.stdin.readline()) ar = list(map(int, sys.stdin.readline().split())) ar = sorted(ar, key=functools.cmp_to_key(compare)) ar = ar[::-1] print(''.join(str(k) for k in ar)) # print(ar)
def build_all_scripts(): mod_structure.cleanup_build_target("script_source") mod_structure.cleanup_build_target("script_library") overall_result = 0 from functools import cmp_to_key def libraries_first(a, b): la = a["type"] == "library" lb = b["type"] == "library" if la == lb: return 0 elif la: return -1 else: return 1 sources = make_config.get_project_value("sources", fallback=[]) sources = sorted(sources, key=cmp_to_key(libraries_first)) for item in sources: _source = item["source"] _target = item["target"] if "target" in item else None _type = item["type"] _language = item["language"] if _type not in ("main", "launcher", "library", "preloader"): print(f"skipped invalid source with type {_type}") overall_result = 1 continue for source_path in make_config.get_project_paths(_source): if not exists(source_path): print(f"skipped non-existing source path {_source}") overall_result = 1 continue target_type = "script_library" if _type == "library" else "script_source" target_path = _target if _target is not None else f"{splitext(basename(source_path))[0]}.js" # translate make.json source type to build.config source type declare = { "sourceType": { "main": "mod", "launcher": "launcher", "preloader": "preloader", "library": "library" }[_type] } if "api" in item: declare["api"] = item["api"] try: dot_index = target_path.rindex(".") target_path = target_path[:dot_index] + "{}" + target_path[ dot_index:] except ValueError: target_path += "{}" destination_path = mod_structure.new_build_target( target_type, target_path, source_type=_type, declare=declare) mod_structure.update_build_config_list("compile") if (isfile(source_path)): copy_file(source_path, destination_path) else: overall_result += build_source(source_path, destination_path) return overall_result
from collections import Counter import functools def cmp(x, y): if x[1] == y[1]: return -1 if x[0] < y[0] else x[0] > y[0] return -1 if x[1] > y[1] else x[1] < y[1] l = Counter(filter(str.isalpha, input().lower())).most_common() l.sort(key=functools.cmp_to_key(cmp)) for i in l: print('{} {}'.format(i[0], i[1]))
def PrintMinNumber(self, numbers): if not numbers: return '' nums = [str(num) for num in numbers] nums.sort(key=cmp_to_key(self._cmp)) return ''.join(nums)
def largest_number(arr): return "".join(sorted(arr, key=functools.cmp_to_key(compare), reverse=True))
def sorting(options, data): # function for choosing the right comparer def choose_compare(by_name, natural, by_size, by_date): def cmp_name(a, b): ta = a[1] tb = b[1] if natural: ta = undot(ta) tb = undot(tb) if ta < tb: return -1 elif ta == tb: return 0 else: return 1 def cmp_size(a, b): ka = int(a[0]) kb = int(b[0]) return ka - kb def cmp_date(a, b): global date_kb na = a[1] nb = b[1] da = date_kb.get(na, None) db = date_kb.get(nb, None) def get_mtime(path): s = os.stat(path) return s.st_mtime if da is None: da = get_mtime(na) date_kb[na] = da if db is None: db = get_mtime(nb) date_kb[nb] = db if da < db: return -1 elif da > db: return 1 else: return 0 if natural: by_name = True functions = [] if by_name: functions.append(cmp_name) if by_size: functions.append(cmp_size) if by_date: functions.append(cmp_date) if len(functions) == 0: # No sorting return None # One or more criteria has been chosen def cmp_combined(a, b): """ sort by applying all selected criteria """ r = 0 for f in functions: r = f(a, b) if r != 0: return r return r return cmp_combined # Choose comparer and apply it, if found cmp_func = choose_compare(options['sort_name'], options['sort_name_natural'], options['sort_size'], options['sort_date']) if cmp_func is None: # No sorting return data # do sorting return sorted(data, key=cmp_to_key(cmp_func))
def list_sort_anagram(arr): return sorted(arr, key=cmp_to_key(comparison))
return -1 if a.name < b.name else 1 names = ['charlie', 'abby', 'bob', 'derek'] players = [] for x in names: players.append(Player(x, names.index(x))) players.append(Player('amy', 3)) for player in players: print(player) print() players.sort(key=cmp_to_key(Player.comparator)) for player in players: print(player) # # DEFINE A SORT METHOD # def sort_item(item): # return item[1] # # items.sort(key=sort_item) # print(items) # # # LAMBDA FUNCTION # items.sort(key=lambda item: item[1], reverse=True) # print(items)
def collect_output( self, schema, # type: Dict[Text, Any] builder, # type: Builder outdir, # type: Text fs_access, # type: StdFsAccess compute_checksum=True # type: bool ): # type: (...) -> Optional[Union[Dict[Text, Any], List[Union[Dict[Text, Any], Text]]]] r = [] # type: List[Any] empty_and_optional = False debug = _logger.isEnabledFor(logging.DEBUG) if "outputBinding" in schema: binding = schema["outputBinding"] globpatterns = [] # type: List[Text] revmap = partial(revmap_file, builder, outdir) if "glob" in binding: with SourceLine(binding, "glob", WorkflowException, debug): for gb in aslist(binding["glob"]): gb = builder.do_eval(gb) if gb: globpatterns.extend(aslist(gb)) for gb in globpatterns: if gb.startswith(builder.outdir): gb = gb[len(builder.outdir) + 1:] elif gb == ".": gb = outdir elif gb.startswith("/"): raise WorkflowException( "glob patterns must not start with '/'") try: prefix = fs_access.glob(outdir) r.extend([{ "location": g, "path": fs_access.join(builder.outdir, g[len(prefix[0]) + 1:]), "basename": os.path.basename(g), "nameroot": os.path.splitext(os.path.basename(g))[0], "nameext": os.path.splitext(os.path.basename(g))[1], "class": "File" if fs_access.isfile(g) else "Directory" } for g in sorted( fs_access.glob(fs_access.join(outdir, gb)), key=cmp_to_key( cast(Callable[[Text, Text], int], locale.strcoll)))]) except (OSError, IOError) as e: _logger.warning(Text(e)) except Exception: _logger.error("Unexpected error from fs_access", exc_info=True) raise for files in r: rfile = files.copy() revmap(rfile) if files["class"] == "Directory": ll = schema.get("loadListing") or builder.loadListing if ll and ll != "no_listing": get_listing(fs_access, files, (ll == "deep_listing")) else: if binding.get("loadContents"): with fs_access.open(rfile["location"], "rb") as f: files[ "contents"] = content_limit_respected_read_bytes( f).decode("utf-8") if compute_checksum: with fs_access.open(rfile["location"], "rb") as f: checksum = hashlib.sha1() # nosec contents = f.read(1024 * 1024) while contents != b"": checksum.update(contents) contents = f.read(1024 * 1024) files[ "checksum"] = "sha1$%s" % checksum.hexdigest( ) files["size"] = fs_access.size(rfile["location"]) optional = False single = False if isinstance(schema["type"], MutableSequence): if "null" in schema["type"]: optional = True if "File" in schema["type"] or "Directory" in schema["type"]: single = True elif schema["type"] == "File" or schema["type"] == "Directory": single = True if "outputEval" in binding: with SourceLine(binding, "outputEval", WorkflowException, debug): r = builder.do_eval(binding["outputEval"], context=r) if single: if not r and not optional: with SourceLine(binding, "glob", WorkflowException, debug): raise WorkflowException( "Did not find output file with glob pattern: '{}'". format(globpatterns)) elif not r and optional: pass elif isinstance(r, MutableSequence): if len(r) > 1: raise WorkflowException( "Multiple matches for output item that is a single file." ) else: r = r[0] if "secondaryFiles" in schema: with SourceLine(schema, "secondaryFiles", WorkflowException, debug): for primary in aslist(r): if isinstance(primary, MutableMapping): primary.setdefault("secondaryFiles", []) pathprefix = primary["path"][0:primary["path"]. rindex("/") + 1] for sf in aslist(schema["secondaryFiles"]): if 'required' in sf: sf_required = builder.do_eval( sf['required'], context=primary) else: sf_required = False if "$(" in sf["pattern"] or "${" in sf[ "pattern"]: sfpath = builder.do_eval(sf["pattern"], context=primary) else: sfpath = substitute( primary["basename"], sf["pattern"]) for sfitem in aslist(sfpath): if not sfitem: continue if isinstance(sfitem, string_types): sfitem = {"path": pathprefix + sfitem} if not fs_access.exists( sfitem['path']) and sf_required: raise WorkflowException( "Missing required secondary file '%s'" % (sfitem["path"])) if "path" in sfitem and "location" not in sfitem: revmap(sfitem) if fs_access.isfile(sfitem["location"]): sfitem["class"] = "File" primary["secondaryFiles"].append( sfitem) elif fs_access.isdir(sfitem["location"]): sfitem["class"] = "Directory" primary["secondaryFiles"].append( sfitem) if "format" in schema: for primary in aslist(r): primary["format"] = builder.do_eval(schema["format"], context=primary) # Ensure files point to local references outside of the run environment adjustFileObjs(r, revmap) if not r and optional: # Don't convert zero or empty string to None if r in [0, '']: return r # For [] or None, return None else: return None if (not empty_and_optional and isinstance(schema["type"], MutableMapping) and schema["type"]["type"] == "record"): out = {} for f in schema["type"]["fields"]: out[shortname( f["name"])] = self.collect_output( # type: ignore f, builder, outdir, fs_access, compute_checksum=compute_checksum) return out return r
from functools import cmp_to_key import sys def point_compare(a: tuple, b: tuple): if a[0] == b[0]: return a[1] - b[1] return a[0] - b[0] input = sys.stdin.readline n = int(input()) points = list() for i in range(n): points.append(tuple(map(int, input().split()))) for p in sorted(points, key=cmp_to_key(point_compare)): print(f'{p[0]} {p[1]}')