Example #1
0
    def __init__(self, **names_vals):
        for name, val in names_vals.iteritems():
            if name.startswith(('_', 'is_valid_')):
                raise NameError('The parameter name %s is not acceptable'
                                % name)
            try:
                convert = self.__class__.params[name]
            except KeyError:
                logging.warn('The parameter %r is unknown, ignoring' % name)
                continue
            try:
                value = convert(val)
            except:
                raise ValueError('Could not convert to %s: %s=%s'
                                 % (convert.__name__, name, val))
            setattr(self, name, value)

        valids = sorted(getattr(self, valid)
                        for valid in dir(self.__class__)
                        if valid.startswith('is_valid_'))
        for is_valid in valids:
            if not is_valid():
                dump = '\n'.join('%s=%s' % (n, v)
                                 for n, v in sorted(self.__dict__.items()))
                raise ValueError(is_valid.__doc__ + 'Got:\n' + dump)
Example #2
0
 def test_create_defaults(self):
     user = User("test_user")
     user.state_from_dict({"key": "1", "key2": "2"})
     user.state_public_keys = ["key2"]
     assert repr(user) == "<User:test_user, connections:0>"
     assert sorted(user.state.items()) == sorted({"key": "1", "key2": "2"}.items())
     assert user.public_state == {"key2": "2"}
Example #3
0
def knapsack_unbounded_dp(items, C):
    # order by max value per item size
    items = sorted(items, key=lambda item: item[VALUE]/float(item[SIZE]), reverse=True)
 
    # Sack keeps track of max value so far as well as the count of each item in the sack
    print('!')
    sack = [(0, [0 for i in items]) for i in range(0, C+1)]   # value, [item counts]
    print('!')
    for i,item in enumerate(items): 
        name, size, value = item
        for c in range(size, C+1):
            print(sack)
            sackwithout = sack[c-size]  # previous max sack to try adding this item to
            trial = sackwithout[0] + value
            used = sackwithout[1][i]
            if sack[c][0] < trial:
                # old max sack with this added item is better
                sack[c] = (trial, sackwithout[1][:])
                sack[c][1][i] +=1   # use one more
 
    value, bagged = sack[C]
    numbagged = sum(bagged)
    size = sum(items[i][1]*n for i,n in enumerate(bagged))
    # convert to (iten, count) pairs) in name order
    bagged = sorted((items[i][NAME], n) for i,n in enumerate(bagged) if n)
 
    return value, size, numbagged, bagged
Example #4
0
def calculateSparseDictCOO(data_set, data_label_hash, jump=1, valid_flag=False):
	row = []
	col = []
	data = []
	row_valid = []
	col_valid = []
	data_valid = []

	doc_ids = set(sorted(map(lambda row:int(row[0]), data_set)))
	base_ids_list = filter(lambda ids: ids % jump == 0, doc_ids)
	train_ids = base_ids_list
	valid_ids = set()
	if valid_flag:
		valid_index = filter(lambda ids: ids % validation_perc == 0, range(len(base_ids_list)))
		valid_ids = [base_ids_list[i] for i in valid_index]
		base_ids = set(base_ids_list)
		train_ids = sorted(base_ids - set(valid_ids))

	labels = map(lambda trid: int(data_label_hash[trid]), train_ids)
	labels_valid = map(lambda vlid: int(data_label_hash[vlid]), valid_ids)
	for i in range(len(data_set)):
		if int(data_set[i][0]) in train_ids:
			row.append(int(data_set[i][0]))
			col.append(int(data_set[i][1])-1)
			data.append(int(data_set[i][2]))
			# labels.append(int(data_label_hash[int(data_set[i][0])]))
		elif int(data_set[i][0]) in valid_ids:
			row_valid.append(int(data_set[i][0]))
			col_valid.append(int(data_set[i][1])-1)
			data_valid.append(int(data_set[i][2]))
			# labels_valid.append(int(data_label_hash[int(data_set[i][0])]))

	train = translate(row), col, data, labels
	valid = translate(row_valid), col_valid, data_valid, labels_valid
	return train, valid
Example #5
0
    def _fill_usm3d_case(self, cases, bcs, mapbc, bcmap_to_bc_name, loads):
        self.scalarBar.VisibilityOff()

        ID = 1
        if bcs is not None and self.is_centroidal:
            cases[(ID, 'Region', 1, 'centroid', '%.0f')] = bcs

            mapbc_print = defaultdict(list)
            for region, bcnum in sorted(mapbc.iteritems()):
                mapbc_print[bcnum].append(region)
                try:
                    name = bcmap_to_bc_name[bcnum]
                except KeyError:
                    name = '???'
                #self.log.info('Region=%i BC=%s name=%r' % (region, bcnum, name))

            for bcnum, regions in sorted(mapbc_print.iteritems()):
                try:
                    name = bcmap_to_bc_name[bcnum]
                except KeyError:
                    name = '???'
                self.log.info('BC=%s Regions=%s name=%r' % (bcnum, regions, name))
            self.scalarBar.VisibilityOn()

        #==============================
        ID = 2
        if self.is_nodal and len(loads):
            for key, load in loads.iteritems():
                cases[(ID, key, 1, 'nodal', '%.3f')] = load
            self.scalarBar.VisibilityOn()
        return cases
Example #6
0
    def assertServiceMetadata(self, meta_keys, count=None, at_least=1):
        log.debug("Looking for service metadata with keys {0}".format(meta_keys))
        if count is not None:
            log.debug(" * should be defined for exactly {0} instances".format(count))
        elif at_least is not None:
            log.debug(" * should be defined for at least {0} instances".format(at_least))

        candidates = []
        for sm in self.service_metadata:
            if sorted(sm.keys()) != sorted(meta_keys):
                continue

            candidates.append(sm)

        try:
            self._candidates_size_assert(candidates, count=count, at_least=at_least)
        except AssertionError:
            log.error("Candidates size assertion for service metadata with keys {0}"
                      " (count: {1}, at_least: {2}) failed".format(meta_keys, count, at_least))
            raise

        for sm in self.service_metadata:
            for csm in candidates:
                if sm == csm:
                    sm['tested'] = True
        log.debug("Service metadata FOUND !")
    def distractors(self,count):
        results = []

        results.append(([self.initial_value, self.initial_value + self.integral_value], r'You have exchanged the roles of \(f(' + str(self.initial) + ')\) and the integral.'))

        results.append(([self.integral_value - self.initial_value, self.integral_value], r'You are subtracting \(f(' + str(self.initial) + ')\) when you should be adding it.'))

        results.append((sorted([self.integral_value, self.initial_value]), r'You should add \(f(' + str(self.initial) + ')\) to the value of the integral.'))

        true_value = n(sum(self.term_x, var('x'), self.initial, oo))

        nearby = RR(true_value).nearby_rational(max_denominator=2 * QQ(self.integral_value).denominator())
        results.append((sorted([self.integral_value, nearby]), r'You should add \(f(' + str(self.initial) + ')\) to the value of the integral.'))

        nearby = RR(true_value).nearby_rational(max_denominator=2 * QQ(self.integral_value).denominator())
        results.append((sorted([self.integral_value + self.initial_value, self.integral_value + self.initial_value*2]), r'You should add \(f(' + str(self.initial) + ')\) to the value of the integral.'))

        results = [r for r in results if not ((r[0][0] <= true_value) and (true_value <= r[0][1]))]

        real_results = []
        for r in results:
            if not any([x[0] == r[0] for x in real_results]):
                real_results.append(r)

        return real_results
Example #8
0
File: run.py Project: dbrgn/jedi
def run_related_name_test(script, correct, line_nr):
    """
    Runs tests for gotos.
    Tests look like this:
    >>> abc = 1
    >>> #< abc@1,0 abc@3,0
    >>> abc

    Return if the test was a fail or not, with 1 for fail and 0 for success.
    """
    result = script.related_names()
    correct = correct.strip()
    compare = sorted((r.module_name, r.start_pos[0], r.start_pos[1])
                                                            for r in result)
    wanted = []
    if not correct:
        positions = []
    else:
        positions = literal_eval(correct)
    for pos_tup in positions:
        if type(pos_tup[0]) == str:
            # this means that there is a module specified
            wanted.append(pos_tup)
        else:
            wanted.append(('renaming', line_nr + pos_tup[0], pos_tup[1]))

    wanted = sorted(wanted)
    if compare != wanted:
        print('Solution @%s not right, received %s, wanted %s'\
                    % (line_nr - 1, compare, wanted))
        return 1
    return 0
  def testIndexCreate(self):
    with aff4.FACTORY.Create("aff4:/sequential_collection/testIndexCreate",
                             TestIndexedSequentialCollection,
                             token=self.token) as collection:
      for i in range(10 * 1024):
        collection.Add(rdfvalue.RDFInteger(i))

      # It is too soon to build an index, check that we don't.
      self.assertEqual(collection._index, None)
      self.assertEqual(collection.CalculateLength(), 10 * 1024)
      self.assertEqual(sorted(collection._index.keys()), [0])

      # Push the clock forward 10m, and we should build an index on access.
      with test_lib.FakeTime(rdfvalue.RDFDatetime().Now() + rdfvalue.Duration(
          "10m")):
        # Read from start doesn't rebuild index (lazy rebuild)
        _ = collection[0]
        self.assertEqual(sorted(collection._index.keys()), [0])

        self.assertEqual(collection.CalculateLength(), 10 * 1024)
        self.assertEqual(
            sorted(collection._index.keys()), [0, 1024, 2048, 3072, 4096, 5120,
                                               6144, 7168, 8192, 9216])

    # Now check that the index was persisted to aff4 by re-opening and checking
    # that a read from head does load full index (optimistic load):

    with aff4.FACTORY.Create("aff4:/sequential_collection/testIndexCreate",
                             TestIndexedSequentialCollection,
                             token=self.token) as collection:
      self.assertEqual(collection._index, None)
      _ = collection[0]
      self.assertEqual(
          sorted(collection._index.keys()), [0, 1024, 2048, 3072, 4096, 5120,
                                             6144, 7168, 8192, 9216])
Example #10
0
def expand_view(request):
  "View for expanding a pattern into matching metric paths"
  local_only    = int( request.REQUEST.get('local', 0) )
  group_by_expr = int( request.REQUEST.get('groupByExpr', 0) )
  leaves_only   = int( request.REQUEST.get('leavesOnly', 0) )
  jsonp = request.REQUEST.get('jsonp', False)

  results = {}
  for query in request.REQUEST.getlist('query'):
    results[query] = set()
    for node in STORE.find(query, local=local_only):
      if node.is_leaf or not leaves_only:
        results[query].add( node.path )

  # Convert our results to sorted lists because sets aren't json-friendly
  if group_by_expr:
    for query, matches in results.items():
      results[query] = sorted(matches)
  else:
    results = sorted( reduce(set.union, results.values(), set()) )

  result = {
    'results' : results
  }

  response = json_response_for(request, result, jsonp=jsonp)
  response['Pragma'] = 'no-cache'
  response['Cache-Control'] = 'no-cache'
  return response
    def test_select_competitive_companies(self):

        # create two industries
        industry_id1 = ensure_id(insert_test_industry())
        industry_id2 = ensure_id(insert_test_industry())

        # create three companies
        company_id1 = ensure_id(insert_test_company(workflow_status = "published"))
        company_id2 = ensure_id(insert_test_company(workflow_status = "published"))
        company_id3 = ensure_id(insert_test_company(workflow_status = "published"))

        # add primary industries to all three companies.  Company 3 gets a different industry.
        self.main_access.mds.call_add_link("company", company_id1, 'primary_industry_classification', 'industry', industry_id1, "primary_industry", "industry_classification", self.context)
        self.main_access.mds.call_add_link("company", company_id2, 'primary_industry_classification', 'industry', industry_id1, "primary_industry", "industry_classification", self.context)
        self.main_access.mds.call_add_link("company", company_id3, 'primary_industry_classification', 'industry', industry_id2, "primary_industry", "industry_classification", self.context)

        # make industries 1 and 2 compete with each other
        link_interval = [datetime.datetime(2012, 1, 1), datetime.datetime(2013, 2, 2)]
        link_data = {"home_to_away": {"weight": .7}, "away_to_home": {"weight": .7}}
        self.main_access.mds.call_add_link("industry", industry_id1, 'competitor', 'industry', industry_id2, "competitor", "industry_competition", self.context, link_interval = link_interval,
                                           link_data = link_data)

        # query the competitions of company
        competitive_companies = select_competitive_companies(company_id1)

        # sort both the expected and real array so that the order doesn't matter
        expected_competitive_companies = sorted([
            { "_id": str(company_id1), "interval": None, "competition_strength": 1 },
            { "_id": str(company_id2), "interval": None, "competition_strength": 1 },
            { "_id": str(company_id3), "interval": [datetime.datetime(2012, 1, 1), datetime.datetime(2013, 2, 2)], "competition_strength": .7 }
        ])
        competitive_companies = sorted(competitive_companies)

        # make sure the competitions are correct
        self.test_case.assertEqual(competitive_companies, expected_competitive_companies)
Example #12
0
def test_add_group(app, db, json_groups):
    group = json_groups
    old_groups = db.get_group_list()
    app.group.create(group)
    new_groups = db.get_group_list()
    old_groups.append(group)
    assert sorted(old_groups, key=group.id_or_max) == sorted(new_groups, key=group.id_or_max)
Example #13
0
def test_onset_functions():
    # Load in all files in the same order
    ref_files = sorted(glob.glob(REF_GLOB))
    est_files = sorted(glob.glob(EST_GLOB))
    sco_files = sorted(glob.glob(SCORES_GLOB))

    assert len(ref_files) == len(est_files) == len(sco_files) > 0

    # Unit tests
    for metric in [mir_eval.onset.f_measure]:
        yield (__unit_test_onset_function, metric)
    # Regression tests
    for ref_f, est_f, sco_f in zip(ref_files, est_files, sco_files):
        with open(sco_f, 'r') as f:
            expected_scores = json.load(f)
        # Load in an example onset annotation
        reference_onsets = mir_eval.io.load_events(ref_f)
        # Load in an example onset tracker output
        estimated_onsets = mir_eval.io.load_events(est_f)
        # Compute scores
        scores = mir_eval.onset.evaluate(reference_onsets, estimated_onsets)
        # Compare them
        for metric in scores:
            # This is a simple hack to make nosetest's messages more useful
            yield (__check_score, sco_f, metric, scores[metric],
                   expected_scores[metric])
Example #14
0
    def testFull(self, num_best=None, shardsize=100):
        if self.cls == similarities.Similarity:
            index = self.cls(None, corpus, num_features=len(dictionary), shardsize=shardsize)
        else:
            index = self.cls(corpus, num_features=len(dictionary))
        if isinstance(index, similarities.MatrixSimilarity):
            expected = numpy.array([
                [0.57735026, 0.57735026, 0.57735026, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
                [0.0, 0.40824831, 0.0, 0.40824831, 0.40824831, 0.40824831, 0.40824831, 0.40824831, 0.0, 0.0, 0.0, 0.0],
                [0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, 0.0, 0.0, 0.0],
                [0.0, 0.0, 0.40824831, 0.0, 0.0, 0.0, 0.81649661, 0.0, 0.40824831, 0.0, 0.0, 0.0],
                [0.0, 0.0, 0.0, 0.57735026, 0.57735026, 0.0, 0.0, 0.57735026, 0.0, 0.0, 0.0, 0.0],
                [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1., 0.0, 0.0],
                [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.70710677, 0.70710677, 0.0],
                [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.57735026, 0.57735026, 0.57735026],
                [0.0, 0.0, 0.0, 0.0, 0.0, 0.57735026, 0.0, 0.0, 0.0, 0.0, 0.57735026, 0.57735026],
                ], dtype=numpy.float32)
            # HACK: dictionary can be in different order, so compare in sorted order
            self.assertTrue(numpy.allclose(sorted(expected.flat), sorted(index.index.flat)))
        index.num_best = num_best
        query = corpus[0]
        sims = index[query]
        expected = [(0, 0.99999994), (2, 0.28867513), (3, 0.23570226), (1, 0.23570226)][ : num_best]

        # convert sims to full numpy arrays, so we can use allclose() and ignore
        # ordering of items with the same similarity value
        expected = matutils.sparse2full(expected, len(index))
        if num_best is not None: # when num_best is None, sims is already a numpy array
            sims = matutils.sparse2full(sims, len(index))
        self.assertTrue(numpy.allclose(expected, sims))
        if self.cls == similarities.Similarity:
            index.destroy()
Example #15
0
def extendedReportOn(fileName):
    fileFilter = open(fileName)
    file = open(os.path.join(baseDir, 'processing/filter.txt'), 'w')
    filterSet = set()
    for s in fileFilter:
        filterSet.add(s.strip())

    sortedAll = sorted(allNumbers, key=sortingFunctionOfAllNumbers)

    result = list()
    
    ''' sortedAll  is  LIST ??????'''
    for key in sortedAll:
        dossier = allNumbers[key]
        fixedNumber = dossier.number.replace('XX', 'EP', 1)
        if fixedNumber in filterSet:
            result.append(dossier)
            '''file.write(dossier.toString()+'\n')'''

    resultSorted = sorted(result, key=sortingFunctionOfExtendedReport)
    for dossier in resultSorted:
        file.write(dossier.toString()+'\n')
    
    fileFilter.close()
    file.close()
Example #16
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('taxonomy', help='name of taxonomy to download (%s)' % 
                        ', '.join(sorted(taxonomies.keys())))
    parser.add_argument('-o', '--output', help='path to save tree output', 
                        nargs='?', default=None)
    parser.add_argument('-f', '--format', help='tree format (%s)' %
                        ', '.join(sorted(bp._io.supported_formats.keys())),
                        nargs='?', default='newick')
    parser.add_argument('-i', '--id', help='Use NCBI ids as label',
                    action='store_true')

    args = parser.parse_args()

    if args.taxonomy == 'ALL':
        classes = [x for x in taxonomies.values() if not x is None]
        args.filename = None
    else:
        classes = [taxonomies[args.taxonomy]]
        
    for c in classes:
        taxonomy = c()
        print '** %s **' % taxonomy.name
        filename = ((args.filename if hasattr(args, 'filename') else None) 
                    or ('%s_taxonomy.%s' % (taxonomy.name, args.format)))
        taxonomy.main(filename, tree_format=args.format, ids=args.id)
Example #17
0
    def assertMetric(self, metric_name, value=None, tags=None, count=None,
                     at_least=1, hostname=None, device_name=None, metric_type=None):
        candidates = []
        for m_name, ts, val, mdata in self.metrics:
            if m_name == metric_name:
                if value is not None and val != value:
                    continue
                if tags is not None and sorted(tags) != sorted(mdata.get("tags", [])):
                    continue
                if hostname is not None and mdata['hostname'] != hostname:
                    continue
                if device_name is not None and mdata['device_name'] != device_name:
                    continue
                if metric_type is not None and mdata['type'] != metric_type:
                    continue

                candidates.append((m_name, ts, val, mdata))

        try:
            self._candidates_size_assert(candidates, count=count, at_least=at_least)
        except AssertionError:
            log.error("Candidates size assertion for {0} (value: {1}, tags: {2}, "
                      "count: {3}, at_least: {4}, hostname: {5}) failed"
                      .format(metric_name, value, tags, count, at_least, hostname))
            raise

        for mtuple in self.metrics:
            for cmtuple in candidates:
                if mtuple == cmtuple:
                    mtuple[3]['tested'] = True
        log.debug("{0} FOUND !".format(metric_name))
Example #18
0
def cleanupFiles():
    # First get rid of modified files
    for l in ["l1", "l2", "l3"]:
        arcpy.Delete_management(l)

    for f in glob.glob("C:\\Arctmp\\*"):
        try:
            shutil.rmtree(f)
        except:
            print "UNABLE TO REMOVE:", f
    # Now remove the old directory
    for i in xrange(0, 1000000):
        new_workspace = "C:\\Arctmp\\workspace." + str(i)
        if not os.path.exists(new_workspace):
            break
    print "TESTING USING WORKSPACE", new_workspace
    # Now move in fresh copies
    shutil.copytree("C:\\Arcbase", new_workspace)
    print "CONTENTS:"
    arcpy.env.workspace = new_workspace
    for f in sorted(glob.glob(arcpy.env.workspace + "\\*.shp")):
        print f
    for f in sorted(glob.glob(arcpy.env.workspace + "\\*.lyr")):
        print f
    for f in sorted(glob.glob(arcpy.env.workspace + "\\*.gdb")):
        print f
Example #19
0
def same_keys(a, b):
    def key(k):
        if isinstance(k, str):
            return (k, -1, -1, -1)
        else:
            return k
    return sorted(a.dask, key=key) == sorted(b.dask, key=key)
Example #20
0
 def testMove(self):
   co = self._get_co(None)
   self._check_move(co)
   out = subprocess2.check_output(
       ['svn', 'status'], cwd=co.project_path)
   out = sorted(out.splitlines())
   expected = sorted(
     [
       'A  +    chromeos/views/webui_menu_widget.h',
       'D       chromeos/views/DOMui_menu_widget.h',
     ])
   self.assertEquals(expected, out)
   # Make sure ancestry is what is expected;
   env = os.environ.copy()
   env['LANGUAGE'] = 'en_US.UTF-8'
   out = subprocess2.check_output(
       ['svn', 'info', 'chromeos/views/webui_menu_widget.h'],
       cwd=co.project_path,
       env=env)
   values = dict(l.split(': ', 1) for l in out.splitlines() if l)
   expected = {
     'Checksum': '65837bb3da662c8fa88a4a50940ea7c6',
     'Copied From Rev': '2',
     'Copied From URL':
         '%strunk/chromeos/views/DOMui_menu_widget.h' % self.svn_base,
     'Name': 'webui_menu_widget.h',
     'Node Kind': 'file',
     'Path': 'chromeos/views/webui_menu_widget.h',
     'Repository Root': '%s' % self.svn_base.rstrip('/'),
     'Revision': '2',
     'Schedule': 'add',
     'URL': '%strunk/chromeos/views/webui_menu_widget.h' % self.svn_base,
   }
   self.assertEquals(expected, values)
Example #21
0
 def isAnagram(self, s, t):
     """
     :type s: str
     :type t: str
     :rtype: bool
     """
     return "".join(sorted(s)) == "".join(sorted(t))
Example #22
0
def getPythonFunc(text):
	module_url = "https://docs.python.org/3/library/%s.html" % text

	try:
		class_link = str(urlopen(module_url).read())
	except: return

	try:
		class_link = class_link.replace("\\n", " ").replace("\\t", "").replace("\\r", "").replace("&nbsp;", " ").replace("\'", "'").replace("&quot;", "\"").replace("\\;", "").replace("\\'", "'").replace("&lt;", "<").replace("&gt;", ">").replace("&trade;", "(TM)").replace("&#8212;", "--").replace("&#8220;", "\"").replace("&#8221;", "\"").replace("\n", " ").replace("\t", "").replace("\r", "").replace("&#8217;", "'")

		value = (re.findall(r'[\w\_]+\.</code><code class="descname">(.*?)<a class=' , class_link))


		pattern = re.compile(r'\\x[\w\d]\d|<.*?>')
		# description = pattern.sub(" ", "".join(result))
		for x in value:
			y = pattern.sub("", x).replace(")", ")#").split("#")[0]
			value[value.index(x)] = y

		# print(value)
		main = []; others = [];

		for x in value:
			if x[0].islower() and x.endswith(")"):
				main.append(x)
			else:
				others.append(x)

		meth_main = "; ".join(sorted(set(main)))
		other_main = "; ".join(sorted(set(others)))

		return meth_main, other_main, module_url
	except : return
Example #23
0
  def Get(self, user, domain=None):  # pylint: disable=g-bad-name
    """Produces the map listing page."""
    title = 'Maps for all domains'
    if domain:
      title = 'Maps for %s' % domain

    # Get ITEMS_PER_PAGE + 1 items so we know whether there is a next page.
    skip = int(self.request.get('skip', '0'))
    maps = list(itertools.islice(
        model.Map.GetViewable(user, domain), skip, skip + ITEMS_PER_PAGE + 1))
    more_items = len(maps) > ITEMS_PER_PAGE
    maps = maps[:ITEMS_PER_PAGE]

    # Attach to each Map a 'catalog_entries' attribute with a list of the
    # CatalogEntry objects that link to that Map.
    published = {}
    for entry in model.CatalogEntry.GetAll():
      published.setdefault(entry.map_id, []).append(entry)
    for m in maps:
      m.catalog_entries = sorted(
          published.get(m.id, []), key=lambda e: (e.domain, e.label))

    self.response.out.write(self.RenderTemplate('map_list.html', {
        'title': title,
        'maps': maps,
        'first': skip + 1,
        'last': skip + len(maps),
        'more_items': more_items,
        'prev_page_url':
            self.request.path_url + '?skip=%d' % max(0, skip - ITEMS_PER_PAGE),
        'next_page_url':
            self.request.path_url + '?skip=%d' % (skip + ITEMS_PER_PAGE),
        'catalog_domains': sorted(
            perms.GetAccessibleDomains(user, perms.Role.CATALOG_EDITOR))
    }))
Example #24
0
            def insert(edit):
                if not any(l.errors for l in linters):
                    return

                filename = os.path.basename(linters[0].filename or 'untitled')
                out = '\n{}:\n'.format(filename)

                for lint in sorted(linters, key=lambda lint: lint.name):
                    if lint.errors:
                        out += '\n  {}:\n'.format(lint.name)
                        items = sorted(lint.errors.items())

                        # Get the highest line number so we know how much padding numbers need
                        highest_line = items[-1][0]
                        width = 1

                        while highest_line >= 10:
                            highest_line /= 10
                            width += 1

                        for line, messages in items:
                            for col, message in messages:
                                out += '    {:>{width}}: {}\n'.format(line + 1, message, width=width)

                output.insert(edit, output.size(), out)
Example #25
0
    def print_ratings(self, names, channel, game_type):
        balance = self.plugins["balance"]

        not_cached = balance.not_cached(game_type, names)
        if not_cached:
            with balance.rlock:
                for lookup in balance.lookups:
                    for n in balance.lookups[lookup][1]:
                        if n in not_cached:
                            not_cached.remove(n)
                if not_cached:
                    balance.fetch_player_ratings(not_cached, channel, game_type)
                if (self.print_ratings, (names, channel, game_type)) not in balance.pending:
                    balance.pending.append((self.print_ratings, (names, channel, game_type)))
                return False

        teams = self.teams()
        red_sorted = sorted(teams["red"], key=lambda x: balance.cache[x.clean_name.lower()][game_type]["elo"], reverse=True)
        blue_sorted = sorted(teams["blue"], key=lambda x: balance.cache[x.clean_name.lower()][game_type]["elo"], reverse=True)
        red = "^7" + ", ".join(["{}: ^1{}^7".format(p, balance.cache[p.clean_name.lower()][game_type]["elo"]) for p in red_sorted])
        blue = "^7" + ", ".join(["{}: ^4{}^7".format(p, balance.cache[p.clean_name.lower()][game_type]["elo"]) for p in blue_sorted])

        channel.reply(red)
        channel.reply(blue)
        return True
Example #26
0
def tell(path):
    entries = []

    base = os.path.join(config["juno-base"], "root")

    path = os.path.join(base, safepath(path))

    dirpath, dirnames, filenames = os.walk(path).next()
    for i in sorted(dirnames):
        subpath = "/" + os.path.relpath(os.path.join(path, i), base)
        entries.append({ "text": i,
                         "id": subpath,
                         "can_write_parent": auth.can_write_parent(subpath[1:]),
                         "can_write": auth.can_write(subpath[1:]) })
    for i in sorted(filenames):
        if i[0] == ".": continue

        subpath = "/" + os.path.relpath(os.path.join(path, i), base)
        entries.append({ "text": i,
                         "id": subpath,
                         "can_write_parent": auth.can_write_parent(subpath[1:]),
                         "can_write": auth.can_write(subpath[1:]),
                         "leaf": "true" })

    return json.dumps(entries)
Example #27
0
def combine(**kwargs):
  """Generate combinations based on its keyword arguments.

  Two sets of returned combinations can be concatenated using +.  Their product
  can be computed using `times()`.

  Args:
    **kwargs: keyword arguments of form `option=[possibilities, ...]`
         or `option=the_only_possibility`.

  Returns:
    a list of dictionaries for each combination. Keys in the dictionaries are
    the keyword argument names.  Each key has one value - one of the
    corresponding keyword argument values.
  """
  if not kwargs:
    return [OrderedDict()]

  sort_by_key = lambda k: k[0][0]
  kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key))
  first = list(kwargs.items())[0]

  rest = dict(list(kwargs.items())[1:])
  rest_combined = combine(**rest)

  key = first[0]
  values = first[1]
  if not isinstance(values, list):
    values = [values]

  return [
      OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key))
      for v in values
      for combined in rest_combined
  ]
def canonical_string(req):
    """
    Canonicalize a request to a token that can be signed.
    """
    amz_headers = {}

    buf = "%s\n%s\n%s\n" % (req.method, req.headers.get('Content-MD5', ''),
                            req.headers.get('Content-Type') or '')

    for amz_header in sorted((key.lower() for key in req.headers
                              if key.lower().startswith('x-amz-'))):
        amz_headers[amz_header] = req.headers[amz_header]

    if 'x-amz-date' in amz_headers:
        buf += "\n"
    elif 'Date' in req.headers:
        buf += "%s\n" % req.headers['Date']

    for k in sorted(key.lower() for key in amz_headers):
        buf += "%s:%s\n" % (k, amz_headers[k])

    # RAW_PATH_INFO is enabled in later version than eventlet 0.9.17.
    # When using older version, swift3 uses req.path of swob instead
    # of it.
    path = req.environ.get('RAW_PATH_INFO', req.path)
    if req.query_string:
        path += '?' + req.query_string
    if '?' in path:
        path, args = path.split('?', 1)
        for key in urlparse.parse_qs(args, keep_blank_values=True):
            if key in ('acl', 'logging', 'torrent', 'location',
                       'requestPayment', 'versioning', 'delete'):
                return "%s%s?%s" % (buf, path, key)
    return buf + path
Example #29
0
def test_format(obj, precision=6):
    tf = lambda o: test_format(o, precision)
    delimit = lambda o: ', '.join(o)
    otype = type(obj)
    if otype is str:
        return "'%s'" % obj
    elif otype is float or otype is int:
        if otype is int:
            obj = float(obj)
        fstr = '%%.%df' % precision
        return fstr % obj
    elif otype is set:
        if len(obj) == 0:
            return 'set()'
        return '{%s}' % delimit(sorted(map(tf, obj)))
    elif otype is dict:
        return '{%s}' % delimit(sorted(tf(k)+': '+tf(v) for k,v in obj.items()))
    elif otype is list:
        return '[%s]' % delimit(map(tf, obj))
    elif otype is tuple:
        return '(%s%s)' % (delimit(map(tf, obj)), ',' if len(obj) is 1 else '')
    elif otype.__name__ in ['Vec','Mat']:
        entries = tf({x:obj.f[x] for x in obj.f if obj.f[x] != 0})
        return '%s(%s, %s)' % (otype.__name__, test_format(obj.D), entries)
    else:
        return str(obj)
Example #30
0
def _get_archive_filelist(filename):
    # type: (str) -> List[str]
    """Extract the list of files from a tar or zip archive.

    Args:
        filename: name of the archive

    Returns:
        Sorted list of files in the archive, excluding './'

    Raises:
        ValueError: when the file is neither a zip nor a tar archive
        FileNotFoundError: when the provided file does not exist (for Python 3)
        IOError: when the provided file does not exist (for Python 2)
    """
    names = []  # type: List[str]
    if tarfile.is_tarfile(filename):
        with tarfile.open(filename) as tar_file:
            names = sorted(tar_file.getnames())
    elif zipfile.is_zipfile(filename):
        with zipfile.ZipFile(filename) as zip_file:
            names = sorted(zip_file.namelist())
    else:
        raise ValueError("Can not get filenames from '{!s}'. "
                         "Not a tar or zip file".format(filename))
    if "./" in names:
        names.remove("./")
    return names