def test__cmp__(self): a = Value('2 [meter / sec]') b = Value('2 [meter / sec]') c = Value('3 [meter / sec]') self.assertEqual(0, cmp(a, b)) self.assertLess(0, cmp(c, a)) self.assertGreater(0, cmp(a, c))
def __cmp__(self, y): """Compare two duration objects""" if isinstance(y, duration): return cmp(self.value, y.value) elif isinstance(y, (int, float)): return cmp(self.value, y) else: raise DurationComparisonError
def asc(): if sort_by != 'modified' and type(ent1) is not type(ent2): return 1 if type(ent1) is File else -1 else: try: return cmp(getattr(ent1, sort_by), getattr(ent2, sort_by)) except AttributeError: return cmp(getattr(ent1, 'name'), getattr(ent2, 'name'))
def xpath_cmp(x, y): # For the moment, we aren't going to worry about repeating # nodes. new_x = re.sub(r"\[\d+\]", u"", x) new_y = re.sub(r"\[\d+\]", u"", y) if new_x == new_y: return cmp(x, y) if new_x not in self._xpaths and new_y not in self._xpaths: return 0 elif new_x not in self._xpaths: return 1 elif new_y not in self._xpaths: return -1 return cmp(self._xpaths.index(new_x), self._xpaths.index(new_y))
def comparer(left, right): for fn, mult in comparers: result = cmp(fn(left), fn(right)) if result: return mult * result else: return 0
def technique_cmp(self, a, b): # a1 = self.old_best_results[a] # a2 = self.best_results[a] # b1 = self.old_best_results[b] # b2 = self.best_results[b] # if a1 is None and b1 is None: # return 0 # if a1 is None: # return -1 # if b1 is None: # return 1 # return self.driver.objective.project_compare(a1, a2, b1, b2, self.factor) # not ready techniques go to the back if not a.is_ready() or not b.is_ready(): return cmp(b.is_ready(), a.is_ready()) a = self.best_results[a] b = self.best_results[b] if a is None and b is None: return 0 if a is None: return -1 if b is None: return 1 return self.driver.objective.compare(a, b)
def qbe_forest(graph, nodes): forest = [] for node, edges in graph.items(): tree, are_all = qbe_tree(graph, copy(nodes), root=node) if are_all and tree not in forest: forest.append(tree) return sorted(forest, cmp=lambda x, y: cmp(len(x), len(y)))
def _sort_by_tzoffset(a_offset, b_offset): # Transform if it's a tuple if isinstance(a_offset, tuple): a_offset = a_offset[0] b_offset = b_offset[0] def split(offset): match = re.match(r'([+-])(\d\d)(\d\d)', offset) return match.group(1) == '-', int(match.group(2)), int(match.group(3)) a_negative, a_hours, a_minutes = split(a_offset) b_negative, b_hours, b_minutes = split(b_offset) if a_hours == 0 and b_hours != 0: return 1 if a_negative and not b_negative: return -1 if not a_negative and b_negative: return 1 if a_negative and b_negative: a_hours = -1 * a_hours b_hours = -1 * b_hours if a_hours > b_hours: return 1 elif a_hours == b_hours: return cmp(a_minutes, b_minutes) else: return -1
def makeDataProducts(self, files, unbanish=False, unignore=False): """makes a list of DPs from a list of (filename,quiet) pairs. If unbanish is False, DPs with a default "banish" policy will be skipped. Symlinks will be resolved, and non-unique filenames removed from list. """ paths = set() dps = [] for filename, quiet in files: filename = filename.rstrip('/') sourcepath = Purr.canonizePath(filename) if sourcepath not in paths: paths.add(sourcepath) filename = os.path.basename(filename) policy, filename, comment = self._default_dp_props.get(filename, ("copy", filename, "")) dprintf(4, "%s: default policy is %s,%s,%s\n", sourcepath, policy, filename, comment) if policy == "banish": if unbanish: policy = "copy" else: continue if unignore and policy == "ignore": policy = "copy" dps.append(Purr.DataProduct(filename=filename, sourcepath=sourcepath, policy=policy, comment=comment, quiet=quiet)) return sorted(dps, lambda a, b: cmp(a.filename, b.filename))
def intersect(self, other): e1 = self.edge e2 = other.edge if (e1 is None) or (e2 is None): return None # if the two edges bisect the same parent return None if e1.reg[1] is e2.reg[1]: return None d = e1.a * e2.b - e1.b * e2.a if isEqual(d, 0.0): return None xint = (e1.c * e2.b - e2.c * e1.b) / d yint = (e2.c * e1.a - e1.c * e2.a) / d if(cmp(e1.reg[1], e2.reg[1]) < 0): he = self e = e1 else: he = other e = e2 rightOfSite = xint >= e.reg[1].x if((rightOfSite and he.pm == Edge.LE) or (not rightOfSite and he.pm == Edge.RE)): return None # create a new site at the point of intersection - this is a new # vector event waiting to happen return Site(xint, yint)
def __cmp__(self, other): if isinstance(other, basestring): other = FirefoxVersion(other) compare = cmp(self.version, other.version) # Numeric versions don't match, so just return the cmp() result. if compare: return compare # The versions are the same, so compare the "postreleases". if self.postrelease and not other.postrelease: return 1 elif not self.postrelease and other.postrelease: return -1 # If there is ever another postrelease, logic will need to be # added here to provide comparison for them. # The postreleases are the same, so compare the "prereleases". # case 1: neither has prerelease; they're equal # case 2: self has prerelease, other doesn't; other is greater # case 3: self doesn't have prerelease, other does: self is greater # case 4: both have prerelease: must compare them! if not self.prerelease and not other.prerelease: return 0 elif self.prerelease and not other.prerelease: return -1 elif not self.prerelease and other.prerelease: return 1 else: prereleases = ('a', '(beta)', 'b', 'pre') prerelease_compare = cmp(prereleases.index(self.prerelease[0]), prereleases.index(other.prerelease[0])) if not prerelease_compare: if self.prerelease[1] is None and other.prerelease[1] is not None: return 1 elif self.prerelease[1] is not None and other.prerelease[1] is None: return -1 else: return cmp(self.prerelease[1], other.prerelease[1]) else: return prerelease_compare
def count_compare(x, y): (x_id, x_desc) = x (y_id, y_desc) = y if type(x_desc) is DictType and "order" in x_desc: x_order = x_desc["order"] else: x_order = -1 if type(y_desc) is DictType and "order" in y_desc: y_order = y_desc["order"] else: y_order = -1 if x_order >= 0 and y_order >= 0: return cmp(x_order, y_order) else: return cmp(x_id, y_id)
def _get_attributes(self, attributes): """ """ ret = [] for data in attributes: ret.append(SpecificationAttribute(specification=self, data=data)) return sorted(ret, lambda x, y: cmp(x.name, y.name))
def __cmp__(self, frec2): """ Compare the present weighted values of two Frecency objects. **Note:** Comparison is of doubtful value between Frecencies with different timescales. A Warning will trigger if this is done, unless suppress_warnings is True. If fast_comparisons == True, timescale and time0 are assumed to be equal without checking. If fast_comparisons == False, a completely general comparison of present weights is used. """ if self.fast_comparisons: return cmp(self.log2_value, frec2.log2_value) else: if self.timescale != frec2.timescale: if not self.suppress_warnings: warnings.warn("Different frecency timescales, {} vs. {}. Comparison may be meaningless.".format(self.timescale, frec2.timescale)) present_weight1 = self.get_present_weight() present_weight2 = frec2.get_present_weight() return cmp(present_weight1, present_weight2)
def testseq_spaninsert(self): n = 5000 t = avl.new(compare=lambda x, y: cmp(y, x)) for i in range(2): for i in gen_ints(0, 3*n): e = random.randint(-n, n) a1, a2 = t.span(e) t.insert(e, a2) self.assertTrue(t.span(e) == (a1, a2+1)) self.assertTrue(verify_len(t, 3*n)) t.clear()
def insert(self, he, site, offset): he.vertex = site he.ystar = site.y + offset last = self.hash[self.getBucket(he)] next = last.qnext while((next is not None) and cmp(he, next) > 0): last = next next = last.qnext he.qnext = last.qnext last.qnext = he self.count += 1
def unique(events, deltat=10., group_cmp=(lambda a, b: cmp(a.catalog, b.catalog))): groups = Event.grouped(events, deltat) events = [] for group in groups: if group: group.sort(group_cmp) events.append(group[-1]) return events
def choicelist_choices(): """Return a list of all choicelists defined for this application.""" l = [] for k, v in list(CHOICELISTS.items()): if v.verbose_name_plural is None: text = v.__name__ else: text = v.verbose_name_plural l.append((k, text)) l.sort(lambda a, b: cmp(a[0], b[0])) return l
def get_tax_template(posting_date, args): """Get matching tax rule""" args = frappe._dict(args) conditions = ["""(from_date is null or from_date <= '{0}') and (to_date is null or to_date >= '{0}')""".format(posting_date)] for key, value in iteritems(args): if key=="use_for_shopping_cart": conditions.append("use_for_shopping_cart = {0}".format(1 if value else 0)) if key == 'customer_group': if not value: value = get_root_of("Customer Group") customer_group_condition = get_customer_group_condition(value) conditions.append("ifnull({0}, '') in ('', {1})".format(key, customer_group_condition)) else: conditions.append("ifnull({0}, '') in ('', '{1}')".format(key, frappe.db.escape(cstr(value)))) tax_rule = frappe.db.sql("""select * from `tabTax Rule` where {0}""".format(" and ".join(conditions)), as_dict = True) if not tax_rule: return None for rule in tax_rule: rule.no_of_keys_matched = 0 for key in args: if rule.get(key): rule.no_of_keys_matched += 1 rule = sorted(tax_rule, key = functools.cmp_to_key(lambda b, a: cmp(a.no_of_keys_matched, b.no_of_keys_matched) or cmp(a.priority, b.priority)))[0] tax_template = rule.sales_tax_template or rule.purchase_tax_template doctype = "{0} Taxes and Charges Template".format(rule.tax_type) if frappe.db.get_value(doctype, tax_template, 'disabled')==1: return None return tax_template
def getAttributeNames(object, includeMagic=1, includeSingle=1, includeDouble=1): """Return list of unique attributes, including inherited, for object.""" attributes = [] dict = {} if not hasattrAlwaysReturnsTrue(object): # Add some attributes that don't always get picked up. special_attrs = ['__bases__', '__class__', '__dict__', '__name__', 'func_closure', 'func_code', 'func_defaults', 'func_dict', 'func_doc', 'func_globals', 'func_name'] attributes += [attr for attr in special_attrs if hasattr(object, attr)] if includeMagic: try: attributes += object._getAttributeNames() except Exception: pass # Get all attribute names. str_type = str(type(object)) if str_type == "<type 'array'>": attributes += dir(object) else: attrdict = getAllAttributeNames(object) # Store the object's dir. object_dir = dir(object) for (obj_type_name, technique, count), attrlist in list(attrdict.items()): # This complexity is necessary to avoid accessing all the # attributes of the object. This is very handy for objects # whose attributes are lazily evaluated. if type(object).__name__ == obj_type_name and technique == 'dir': attributes += attrlist else: attributes += [attr for attr in attrlist if attr not in object_dir and hasattr(object, attr)] # Remove duplicates from the attribute list. for item in attributes: dict[item] = None attributes = list(dict.keys()) # new-style swig wrappings can result in non-string attributes # e.g. ITK http://www.itk.org/ attributes = [attribute for attribute in attributes if type(attribute) == str] attributes.sort(lambda x, y: cmp(x.upper(), y.upper())) if not includeSingle: attributes = [item for item in attributes if item[0] != '_' or item[1:2] == '_'] if not includeDouble: attributes = [item for item in attributes if item[:2] != '__'] return attributes
def _get_apis(self, apis): """ Process apis for the given model Args: model: the model processed apis: the list of apis availble for the current model relations: dict containing all relations between resources """ ret = [] for data in apis: ret.append(SpecificationAPI(specification=self, data=data)) return sorted(ret, lambda x, y: cmp(x.rest_name, y.rest_name))
def autocomplete_graph(admin_site, current_models, directed=False): graph = qbe_graph(admin_site, directed=directed) valid_paths = [] for c, d in combinations(current_models, 2): paths = find_minimal_paths(graph, c, d) combined_sets = combine(paths) for combined_set in combined_sets: path = graphs_join(combined_set) valid_paths.append(path) # for path in paths: # if all(map(lambda x: x in path, current_models)): # if path not in valid_paths: # valid_paths.append(path) return sorted(valid_paths, cmp=lambda x, y: cmp(len(x), len(y)))
def compare_accounts(a, b): if is_root: if a.report_type != b.report_type and a.report_type == "Balance Sheet": return -1 if a.root_type != b.root_type and a.root_type == "Asset": return -1 if a.root_type == "Liability" and b.root_type == "Equity": return -1 if a.root_type == "Income" and b.root_type == "Expense": return -1 else: if re.split('\W+', a[key])[0].isdigit(): # if chart of accounts is numbered, then sort by number return cmp(a[key], b[key]) return 1
def get_default_contact(doctype, name): '''Returns default contact for the given doctype, name''' out = frappe.db.sql('''select parent, (select is_primary_contact from tabContact c where c.name = dl.parent) as is_primary_contact from `tabDynamic Link` dl where dl.link_doctype=%s and dl.link_name=%s and dl.parenttype = "Contact"''', (doctype, name)) if out: return sorted(out, key = functools.cmp_to_key(lambda x,y: cmp(y[1], x[1])))[0][0] else: return None
def testCluster(self): data = cp.read_data(uuid=self.testUUID) # Test to make sure empty dataset doesn't crash the program clusters, labels, new_data = cp.cluster([], 10) self.assertTrue(len(new_data) == clusters == len(labels) == 0) # Test to make sure clustering with noise works clusters, labels, new_data = cp.cluster(data, 10) self.assertEqual(len(labels), len(new_data)) self.assertEqual(cmp(new_data, data), 0) # Test to make sure clustering without noise works data, bins = cp.remove_noise(data, self.RADIUS) clusters, labels, new_data = cp.cluster(data, len(bins)) self.assertTrue(clusters == 0 or len(bins) <= clusters <= len(bins) + 10)
def compareFloats(a, b, rtol=1.0e-5, atol=RO.SysConst.FAccuracy): """Compares values a and b Returns 0 if the values are approximately equals, i.e.: - |a - b| < atol + (rtol * |a + b|) Else 1 if a > b, -1 if a < b Inputs: - a, b: scalars to be compared (int or float) - atol: absolute tolerance - rtol: relative tolerance The algorithm used is the same one used by numpy.allclose. """ if abs(a - b) < (atol + (rtol * abs(float(a + b)))): return 0 return cmp(a, b)
def get_help_messages(): '''Return help messages for the desktop (called via `get_help_messages` hook) Format for message: { title: _('Add Employees to Manage Them'), description: _('Add your Employees so you can manage leaves, expenses and payroll'), action: 'Add Employee', route: 'List/Employee' } ''' messages = [] for fn in frappe.get_hooks('get_help_messages'): messages += frappe.get_attr(fn)() return sorted(messages, key = functools.cmp_to_key(lambda a, b: cmp(a.get('count'), b.get('count'))))
def getRenderers(filename): """For a given DP, returns a list of renderer ids giving the renderers that support the source file type""" global available_renderers renderers = [] for rdrid, (renderer, module) in available_renderers.items(): try: priority = renderer.canRender(filename) except: print("""Error in renderer: %s.canRender("%s"):""" % (rdrid, filename)) traceback.print_exc() priority = None if priority: renderers.append((priority, rdrid)) # sort by priority renderers.sort(lambda a, b: cmp(a[0], b[0])) # return list of IDs. Note that "none" should always be available and working return [a[1] for a in renderers] or ["link"]
def get_default_address(doctype, name, sort_key='is_primary_address'): '''Returns default Address name for the given doctype, name''' if sort_key not in ['is_shipping_address', 'is_primary_address']: return None out = frappe.db.sql(""" SELECT addr.name, addr.%s FROM `tabAddress` addr, `tabDynamic Link` dl WHERE dl.parent = addr.name and dl.link_doctype = %s and dl.link_name = %s and ifnull(addr.disabled, 0) = 0 """ %(sort_key, '%s', '%s'), (doctype, name)) if out: return sorted(out, key = functools.cmp_to_key(lambda x,y: cmp(y[1], x[1])))[0][0] else: return None
def add_ordered_combo_item( combo, text, data=None, count_selected_features=None, icon=None): """Add a combo item ensuring that all items are listed alphabetically. Although QComboBox allows you to set an InsertAlphabetically enum this only has effect when a user interactively adds combo items to an editable combo. This we have this little function to ensure that combos are always sorted alphabetically. :param combo: Combo box receiving the new item. :type combo: QComboBox :param text: Display text for the combo. :type text: str :param data: Optional UserRole data to be associated with the item. :type data: QVariant, str :param count_selected_features: A count to display if the layer has some selected features. Default to None, nothing will be displayed. :type count_selected_features: None, int :param icon: Icon to display in the combobox. :type icon: QIcon """ if count_selected_features is not None: text += ' (' + tr('{count} selected features').format( count=count_selected_features) + ')' size = combo.count() for combo_index in range(0, size): item_text = combo.itemText(combo_index) # see if text alphabetically precedes item_text if cmp(text.lower(), item_text.lower()) < 0: if icon: combo.insertItem(combo_index, icon, text, data) else: combo.insertItem(combo_index, text, data) return # otherwise just add it to the end if icon: combo.insertItem(size, icon, text, data) else: combo.insertItem(size, text, data)
def orderby_cmp(x, y): if x['ordertype'] == y['ordertype']: return cmp(Decimal(x['cjfee']), Decimal(y['cjfee'])) return cmp(offername_list.index(x['ordertype']), offername_list.index(y['ordertype']))
def init_vm(vmname="$VM_NAME", vmtype="$VM_TYPE", autodelete=True, reuse_boot=True, autodelete_boot=None, propagate=True, **kw): """Creates a GCE VM instance. vmname: instance name vmtype: VM type (use gce.list_machine_types() to get a list) autodelete: if True, deletes any existing instance with same name reuse_boot: if True and a boot disk with same name exists, reuses that autodelete_boot: if True, boot disk will be auto-deleted when VM is shut down propagate: if True, calls propagate_scripts() to copy Pyxis scripts from current directory to VM. Can be set to a directory name to propagate to a specific directory on the remote machine. attach_XXX: create additional disks. Use attach_XXX=N to create a disk of N Gb and attach to VM under /XXX (equivalent to calling attach_disk('XXX',size=N)). attach_XXX=dict(...) will call attach_disk('XXX',...) """ name, vmtype = interpolate_locals("vmname vmtype") # check if VM exists and needs to be deleted if autodelete and name in get_vms(): warn("deleting existing VM $name") delete_vm(name, disks=False) # check if a boot disk needs to be created disks = get_disks() if name in disks: if reuse_boot: info( "boot disk $name already exists, reusing (disable with reuse_boot=False)" ) if autodelete_boot is None: info( "boot disk $name will not be auto-deleted when VM is destroyed" ) autodelete_boot = False else: gc("disks delete $name --quiet") del disks[name] if name not in disks: snapshot = VM_SNAPSHOT if '*' in snapshot: from past.builtins import cmp from functools import cmp_to_key matching = sorted( list(get_snapshots(snapshot).keys()), key=cmp_to_key( lambda a, b: -cmp(_version_suffix(a), _version_suffix(b)))) snapshot = matching[0] info("using latest snapshot $snapshot") gc("disks create $name --source-snapshot $snapshot") if autodelete_boot is None: info("boot disk $name will be auto-deleted when VM is destroyed") autodelete_boot = True # create VM scopes = "--scopes storage-rw" gc("instances create $name --machine-type $vmtype --disk name=$name mode=rw boot=yes auto-delete=%s $scopes" % ("yes" if autodelete_boot else "no")) info("created VM instance $name, type $vmtype") # run provisioning script provision_vm(name) # attach disks for key, value in kw.items(): if key.startswith("attach_"): if isinstance(value, dict): attach_disk(key[len("attach_"):], vmname=vmname, **value) elif isinstance(value, int): attach_disk(key[len("attach_"):], size=value, vmname=vmname) else: raise TypeError("unknown data type for %s" % key) # provision with pyxis scripts in specified directory if propagate: propagate_scripts(name, dir=propagate if isinstance(propagate, str) else "")
def test_rect(self): element = self.get_control() self.assertEqual(0, cmp(element.rect, [0, 1, 2, 3]))
def sort_result_list(a, b): result = -cmp(a.severity, b.severity) if result == 0: result = cmp(a.summary, b.summary) return result
def __cmp__(self, other): return cmp(int(self.value), int(other))
def compare_cb_op_as_hkl( a, b): # FIXME Py2-style compare, wrap in functools.cmp_to_key if (len(a) < len(b)): return -1 if (len(a) > len(b)): return 1 from past.builtins import cmp return cmp(a, b)
def mycmp(x, y): return cmp(float(x[1]), float(y[1]))
def __cmp__(self, other): return cmp(self.n, other.n)
def brentmin(xlow,xupp,Nitmax,tol,f,nout=None,*args): ''' Brent's minimization method in one dimension. Given a function f, and given a search interval this routine isolates the minimum of fractional precision of about tol using Brent's method. Reference: Section 10.2 Parabolic Interpolation and Brent's Method in One Dimension Press, Teukolsky, Vetterling & Flannery Numerical Recipes in C, Cambridge University Press, 2002 This is a python implementation of gpml functionality (Copyright (c) by Hannes Nickisch 2010-01-10). xmin,fmin,funccout,varargout = BRENTMIN(xlow,xupp,Nit,tol,f,nout,varargin) :param xlow: lower bound. i.e. search interval such that xlow<=xmin<=xupp :param xupp: uppper bound. i.e. search interval such that xlow<=xmin<=xupp :param Nitmax: maximum number of function evaluations made by the routine :param tol: fractional precision :param f: [y,varargout{:}] = f(x,varargin{:}) is the function :param nout: no. of outputs of f (in varargout) in addition to the y value :return: fmin is minimal function value. xmin is corresponding abscissa-value funccount is the number of function evaluations made. varargout is additional outputs of f at optimum. ''' # code taken from # Section 10.2 Parabolic Interpolation and Brent's Method in One Dimension # Press, Teukolsky, Vetterling & Flannery # Numerical Recipes in C, Cambridge University Press, 2002 # # [xmin,fmin,funccout,varargout] = BRENTMIN(xlow,xupp,Nit,tol,f,nout,varargin) # Given a function f, and given a search interval this routine isolates # the minimum of fractional precision of about tol using Brent's method. # # INPUT # ----- # xlow,xupp: search interval such that xlow<=xmin<=xupp # Nitmax: maximum number of function evaluations made by the routine # tol: fractional precision # f: [y,varargout{:}] = f(x,varargin{:}) is the function # nout: no. of outputs of f (in varargout) in addition to the y value # # OUTPUT # ------ # fmin: minimal function value # xmin: corresponding abscissa-value # funccount: number of function evaluations made # varargout: additional outputs of f at optimum # # This is a python implementation of gpml functionality (Copyright (c) by # Hannes Nickisch 2010-01-10). if nout == None: nout = 0 eps = sys.float_info.epsilon # tolerance is no smaller than machine's floating point precision tol = max(tol,eps) # Evaluate endpoints vargout = f(xlow,*args); fa = vargout[0][0] vargout = f(xupp,*args); fb = vargout[0][0] funccount = 2 # number of function evaluations # Compute the start point seps = sqrt(eps) c = 0.5*(3.0 - sqrt(5.0)) # golden ratio a = xlow b = xupp v = a + c*(b-a) w = v xf = v d = 0. e = 0. x = xf vargout = f(x,*args) fx = vargout[0][0] varargout = vargout[1:] funccount += 1 fv = fx; fw = fx xm = 0.5*(a+b) tol1 = seps*abs(xf) + old_div(tol,3.0); tol2 = 2.0*tol1 # Main loop while ( abs(xf-xm) > (tol2 - 0.5*(b-a)) ): gs = True # Is a parabolic fit possible if abs(e) > tol1: # Yes, so fit parabola gs = False r = (xf-w)*(fx-fv) q = (xf-v)*(fx-fw) p = (xf-v)*q-(xf-w)*r q = 2.0*(q-r) if q > 0.0: p = -p q = abs(q) r = e; e = d # Is the parabola acceptable if ( (abs(p)<abs(0.5*q*r)) and (p>q*(a-xf)) and (p<q*(b-xf)) ): # Yes, parabolic interpolation step d = old_div(p,q) x = xf+d # f must not be evaluated too close to ax or bx if ((x-a) < tol2) or ((b-x) < tol2): si = cmp(xm-xf,0) if ((xm-xf) == 0): si += 1 d = tol1*si else: # Not acceptable, must do a golden section step gs = True if gs: # A golden-section step is required if xf >= xm: e = a-xf else: e = b-xf d = c*e # The function must not be evaluated too close to xf si = cmp(d,0) if (d == 0): si += 1 x = xf + si * max(abs(d),tol1) vargout = f(x,*args); fu = vargout[0][0]; varargout = vargout[1:] funccount += 1 # Update a, b, v, w, x, xm, tol1, tol2 if fu <= fx: if x >= xf: a = xf else: b = xf v = w; fv = fw w = xf; fw = fx xf = x; fx = fu else: # fu > fx if x < xf: a = x else: b = x if ( (fu <= fw) or (w == xf) ): v = w; fv = fw w = x; fw = fu elif ( (fu <= fv) or ((v == xf) or (v == w)) ): v = x; fv = fu xm = 0.5*(a+b) tol1 = seps*abs(xf) + old_div(tol,3.0); tol2 = 2.0*tol1 if funccount >= Nitmax: # typically we should not get here logging.getLogger(__name__).warning("Specified number of function evaluation reached") break # check that endpoints are less than the minimum found if ( (fa < fx) and (fa <= fb) ): xf = xlow; fx = fa elif fb < fx: xf = xupp; fx = fb fmin = fx xmin = xf vargout = [xmin,fmin,funccount] for vv in varargout: vargout.append(vv) return vargout
def voronoi(siteList, context): try: edgeList = EdgeList(siteList.xmin, siteList.xmax, len(siteList)) priorityQ = PriorityQueue(siteList.ymin, siteList.ymax, len(siteList)) siteIter = siteList.iterator() bottomsite = next(siteIter) context.outSite(bottomsite) newsite = next(siteIter) minpt = Site(-BIG_FLOAT, -BIG_FLOAT) while True: if not priorityQ.isEmpty(): minpt = priorityQ.getMinPt() if (newsite and (priorityQ.isEmpty() or cmp(newsite, minpt) < 0)): # newsite is smallest - this is a site event context.outSite(newsite) # get first Halfedge to the LEFT and RIGHT of the new site lbnd = edgeList.leftbnd(newsite) rbnd = lbnd.right # if this halfedge has no edge, bot = bottom site (whatever that is) # create a new edge that bisects bot = lbnd.rightreg(bottomsite) edge = Edge.bisect(bot, newsite) context.outBisector(edge) # create a new Halfedge, setting its pm field to 0 and insert # this new bisector edge between the left and right vectors in # a linked list bisector = Halfedge(edge, Edge.LE) edgeList.insert(lbnd, bisector) # if the new bisector intersects with the left edge, remove # the left edge's vertex, and put in the new one p = lbnd.intersect(bisector) if p is not None: priorityQ.delete(lbnd) priorityQ.insert(lbnd, p, newsite.distance(p)) # create a new Halfedge, setting its pm field to 1 # insert the new Halfedge to the right of the original bisector lbnd = bisector bisector = Halfedge(edge, Edge.RE) edgeList.insert(lbnd, bisector) # if this new bisector intersects with the right Halfedge p = bisector.intersect(rbnd) if p is not None: # push the Halfedge into the ordered linked list of vertices priorityQ.insert(bisector, p, newsite.distance(p)) newsite = next(siteIter) elif not priorityQ.isEmpty(): # intersection is smallest - this is a vector (circle) event # pop the Halfedge with the lowest vector off the ordered list of # vectors. Get the Halfedge to the left and right of the above HE # and also the Halfedge to the right of the right HE lbnd = priorityQ.popMinHalfedge() llbnd = lbnd.left rbnd = lbnd.right rrbnd = rbnd.right # get the Site to the left of the left HE and to the right of # the right HE which it bisects bot = lbnd.leftreg(bottomsite) top = rbnd.rightreg(bottomsite) # output the triple of sites, stating that a circle goes through them mid = lbnd.rightreg(bottomsite) context.outTriple(bot, top, mid) # get the vertex that caused this event and set the vertex number # couldn't do this earlier since we didn't know when it would be processed v = lbnd.vertex siteList.setSiteNumber(v) context.outVertex(v) # set the endpoint of the left and right Halfedge to be this vector if lbnd.edge.setEndpoint(lbnd.pm, v): context.outEdge(lbnd.edge) if rbnd.edge.setEndpoint(rbnd.pm, v): context.outEdge(rbnd.edge) # delete the lowest HE, remove all vertex events to do with the # right HE and delete the right HE edgeList.delete(lbnd) priorityQ.delete(rbnd) edgeList.delete(rbnd) # if the site to the left of the event is higher than the Site # to the right of it, then swap them and set 'pm' to RIGHT pm = Edge.LE if bot.y > top.y: bot, top = top, bot pm = Edge.RE # Create an Edge (or line) that is between the two Sites. This # creates the formula of the line, and assigns a line number to it edge = Edge.bisect(bot, top) context.outBisector(edge) # create a HE from the edge bisector = Halfedge(edge, pm) # insert the new bisector to the right of the left HE # set one endpoint to the new edge to be the vector point 'v' # If the site to the left of this bisector is higher than the right # Site, then this endpoint is put in position 0; otherwise in pos 1 edgeList.insert(llbnd, bisector) if edge.setEndpoint(Edge.RE - pm, v): context.outEdge(edge) # if left HE and the new bisector don't intersect, then delete # the left HE, and reinsert it p = llbnd.intersect(bisector) if p is not None: priorityQ.delete(llbnd) priorityQ.insert(llbnd, p, bot.distance(p)) # if right HE and the new bisector don't intersect, then reinsert it p = bisector.intersect(rrbnd) if p is not None: priorityQ.insert(bisector, p, bot.distance(p)) else: break he = edgeList.leftend.right while he is not edgeList.rightend: context.outEdge(he.edge) he = he.right Edge.EDGE_NUM = 0 except Exception as err: # fix_print_with_import print("######################################################") # fix_print_with_import print(str(err))
def cmp_prob_val(self, a, b): if self[a] == self[b]: return cmp(a, b) else: return cmp(self[b], self[a])
if not res: print('COULD NOT PARSE LOGLINE:', d) continue lc = res.groupdict()['log_contents'] #print(lc) cres = c.search(lc) if cres: #print cres.groupdict() pass else: print('COULD NOT RESOLVE:', lc) #print 'WHOLE LINE:',d #raise Exception(cnt) continue for k, v in list(cres.groupdict().items()): if v is None: continue if k not in karr: karr[k] = 0 if 'print' in sys.argv: if 'ts' in sys.argv: print(res.groupdict()['date_hour'], res.groupdict()['date_minute'], k, v) else: print(k, v) karr[k] += 1 if 'digest' in sys.argv: karr_sorted = sorted(list(karr.items()), lambda x, y: cmp(x[1], y[1])) for k, v in karr_sorted: print(k, v) print('done going through ', cnt)
def __cmp__(self, other): return cmp(self.prio, other.prio)
def list_versions(self, project): versions = self.jira.project(project).versions versions.sort(cmp=lambda l, r: cmp(l.id, l.id)) return [k.raw for k in versions]
def __cmp__(self, other): if other.__class__ is self.__class__: return cmp(self.value, other.value) return cmp(self.value, other)
def func(self): "Implement function using the Msg methods" # Since player_caller is set above, this will be a Player. caller = self.caller # get the messages we've sent (not to channels) pages_we_sent = Msg.objects.get_messages_by_sender( caller, exclude_channel_messages=True) # get last messages we've got pages_we_got = Msg.objects.get_messages_by_receiver(caller) if 'last' in self.switches: if pages_we_sent: recv = ",".join(obj.key for obj in pages_we_sent[-1].receivers) self.msg("You last paged {c%s{n:%s" % (recv, pages_we_sent[-1].message)) return else: self.msg("You haven't paged anyone yet.") return if not self.args or not self.rhs: pages = pages_we_sent + pages_we_got pages.sort(lambda x, y: cmp(x.date_created, y.date_created)) number = 5 if self.args: try: number = int(self.args) except ValueError: self.msg("Usage: tell [<player> = msg]") return if len(pages) > number: lastpages = pages[-number:] else: lastpages = pages template = "{w%s{n {c%s{n to {c%s{n: %s" lastpages = "\n ".join( template % (utils.datetime_format(page.date_created), ",".join( obj.key for obj in page.senders), "{n,{c ".join([obj.name for obj in page.receivers]), page.message) for page in lastpages) if lastpages: string = "Your latest pages:\n %s" % lastpages else: string = "You haven't paged anyone yet." self.msg(string) return # We are sending. Build a list of targets if not self.lhs: # If there are no targets, then set the targets # to the last person we paged. if pages_we_sent: receivers = pages_we_sent[-1].receivers else: self.msg("Who do you want to page?") return else: receivers = self.lhslist recobjs = [] for receiver in set(receivers): if isinstance(receiver, basestring): pobj = caller.search(receiver) elif hasattr(receiver, 'character'): pobj = receiver else: self.msg("Who do you want to page?") return if pobj: recobjs.append(pobj) if not recobjs: self.msg("Noone found to page.") return header = "{wPlayer{n {c%s{n {wpages:{n" % caller.key message = self.rhs # if message begins with a :, we assume it is a 'page-pose' if message.startswith(":"): message = "%s %s" % (caller.key, message.strip(':').strip()) # create the persistent message object create.create_message(caller, message, receivers=recobjs) # tell the players they got a message. received = [] rstrings = [] for pobj in recobjs: if not pobj.access(caller, 'msg'): rstrings.append("You are not allowed to page %s." % pobj) continue pobj.msg("%s %s" % (header, message)) if hasattr(pobj, 'sessions') and not pobj.sessions.count(): received.append("{C%s{n" % pobj.name) rstrings.append( "%s is offline. They will see your message if they list their pages later." % received[-1]) else: received.append("{c%s{n" % pobj.name) if rstrings: self.msg("\n".join(rstrings)) self.msg("You paged %s with: '%s'." % (", ".join(received), message))
def _cmp_antenna(sa, sb): """Helper function to sort antenna names. Try numeric compare first, fall back to text compare if failed""" try: return cmp(int(sa), int(sb)) except: return cmp(sa, sb)
def __depth_cmp(item1, item2): d1 = item1.get("depth", 1) d2 = item2.get("depth", 1) return cmp(d1, d2)
def cmp_exec_time(task1, task2): return cmp(task1.execStartTimeMs, task2.execStartTimeMs)
def download_system_symbols_if_needed(symbols_directory): """Download system libraries from |SYMBOLS_URL| and cache locally.""" # For local testing, we do not have access to the cloud storage bucket with # the symbols. In this case, just bail out. if environment.get_value('LOCAL_DEVELOPMENT'): return # When running reproduce tool locally, we do not have access to the cloud # storage bucket with the symbols. In this case, just bail out. if environment.get_value('REPRODUCE_TOOL'): return # We have archived symbols for google builds only. if not settings.is_google_device(): return # Get the build fingerprint parameters. build_params = settings.get_build_parameters() if not build_params: logs.log_error('Unable to determine build parameters.') return build_id = build_params.get('build_id') target = build_params.get('target') type = build_params.get('type') if not build_id or not target or not type: logs.log_error('Null build parameters found, exiting.') return # Check if we already have the symbols in cache. build_params_check_path = os.path.join(symbols_directory, '.cached_build_params') cached_build_params = utils.read_data_from_file(build_params_check_path, eval_data=True) if cached_build_params and cmp(cached_build_params, build_params) == 0: # No work to do, same system symbols already in cache. return symbols_archive_filename = '%s-symbols-%s.zip' % (target, build_id) symbols_archive_path = os.path.join(symbols_directory, symbols_archive_filename) # Delete existing symbols directory first. shell.remove_directory(symbols_directory, recreate=True) # Fetch symbol file from cloud storage cache (if available). found_in_cache = storage.get_file_from_cache_if_exists( symbols_archive_path, update_modification_time_on_access=False) if not found_in_cache: # Include type and sanitizer information in the target. target_with_type_and_san = '%s-%s' % (target, type) tool_suffix = environment.get_value('SANITIZER_TOOL_NAME') if tool_suffix and not tool_suffix in target_with_type_and_san: target_with_type_and_san += '_%s' % tool_suffix # Fetch the artifact now. fetch_artifact.get(build_id, target_with_type_and_san, symbols_archive_filename, symbols_directory) if not os.path.exists(symbols_archive_path): logs.log_error('Unable to locate symbols archive %s.' % symbols_archive_path) return # Store the artifact for later use or for use by other bots. storage.store_file_in_cache(symbols_archive_path) archive.unpack(symbols_archive_path, symbols_directory, trusted=True) shell.remove_file(symbols_archive_path) utils.write_data_to_file(build_params, build_params_check_path)
def __cmp__(self, other): assert isinstance(other, RdiffTime) return cmp(self.epoch(), other.epoch())
def __cmp__(self, other): """Compares quality of parsed, if quality is equal, compares proper_count.""" return cmp((self.quality), (other.quality))
def __cmp__(self, other): """Compares quality of parsers, if quality is equal, compares proper_count.""" return cmp((self.quality, self.episodes, self.proper_count), (other.quality, other.episodes, other.proper_count))
def __cmp__(self, rhs): if isinstance(rhs, JobProperty): return cmp(self(), rhs()) return cmp(self(), rhs)
def __cmp__(self, other): return cmp(self.value, (other.value if isinstance(other, ValueConstant) else other))
def reporting_sorter(self, a, b): """ Used for sorting cobbler objects for report commands """ return cmp(a.name, b.name)
def __cmp__(self, other): return cmp(self.ID, other.ID)
def __cmp__(self, other): return cmp(ALL_RANKS_ORDERED.index(self.rank), ALL_RANKS_ORDERED.index(other.rank)) return dmt
def __cmp__(self, other): """ Sort Atoms first by mass, then name, then index.""" return cmp((self.mass, self.name, self.index), (other.mass, other.name, other.index))
def cmp_version(ver1, ver2): """Compare two version strings in the form of 1.2.34""" return cmp([int(v) for v in ver1.split('.')], [int(v) for v in ver2.split('.')])