Esempio n. 1
0
def who(connection):
    protocol = connection.protocol
    player_count = len(protocol.players)
    if player_count == 0:
        connection.me('has no players connected')
        return
    sorted_players = sorted(protocol.players.values(),
        key = attrgetter('team.id', 'name'))
    name_formatter = format_name_color if connection.colors else format_name
    teams = []
    formatted_names = []
    for k, g in groupby(sorted_players, attrgetter('team')):
        teams.append(k)
        formatted_names.append(map(name_formatter, g))
    separator = '\x0f, ' if connection.colors else ', '
    if not SPLIT_WHO_IN_TEAMS or player_count < SPLIT_THRESHOLD:
        noun = 'player' if player_count == 1 else 'players'
        msg = 'has %s %s connected: ' % (player_count, noun)
        msg += separator.join(chain.from_iterable(formatted_names))
        connection.me(msg)
    else:
        for team, names in izip(teams, formatted_names):
            name_count = len(names)
            noun = 'player' if name_count == 1 else 'players'
            msg = 'has %s %s in %s: ' % (name_count, noun, team.name)
            msg += separator.join(names)
            connection.me(msg)
Esempio n. 2
0
    def test_related_objects_for_inherited_models(self):
        # Related objects work just as they normally do.
        s1 = Supplier.objects.create(name="Joe's Chickens", address="123 Sesame St")
        s1.customers.set([self.restaurant, self.italian_restaurant])
        s2 = Supplier.objects.create(name="Luigi's Pasta", address="456 Sesame St")
        s2.customers.set([self.italian_restaurant])

        # This won't work because the Place we select is not a Restaurant (it's
        # a Supplier).
        p = Place.objects.get(name="Joe's Chickens")
        with self.assertRaises(Restaurant.DoesNotExist):
            p.restaurant

        self.assertEqual(p.supplier, s1)
        self.assertQuerysetEqual(
            self.italian_restaurant.provider.order_by("-name"), ["Luigi's Pasta", "Joe's Chickens"], attrgetter("name")
        )
        self.assertQuerysetEqual(
            Restaurant.objects.filter(provider__name__contains="Chickens"),
            ["Ristorante Miron", "Demon Dogs"],
            attrgetter("name"),
        )
        self.assertQuerysetEqual(
            ItalianRestaurant.objects.filter(provider__name__contains="Chickens"),
            ["Ristorante Miron"],
            attrgetter("name"),
        )

        ParkingLot.objects.create(name="Main St", address="111 Main St", main_site=s1)
        ParkingLot.objects.create(name="Well Lit", address="124 Sesame St", main_site=self.italian_restaurant)

        self.assertEqual(Restaurant.objects.get(lot__name="Well Lit").name, "Ristorante Miron")
Esempio n. 3
0
def greedy(s, v):
    # Sort server list
    s.sort(key=operator.attrgetter("cases"), reverse=True)
    
    # Sort machine list
    v.sort(key=operator.attrgetter("cases"), reverse=True)
    
    thresholdRange = 0.1
    threshold = thresholdRange
    sId = 0
    
    # Until there's no machine left
    while v :
        currentServer = s[sId]
        currentMachine = v.pop(0)
        
        # If we have space on this server
        if currentServer.usage <= threshold-thresholdRange :
            # Add machine
            currentServer.addVM(currentMachine)
            
            # Update threshold if necessary
            if currentServer.usage > threshold :
                threshold = currentServer.usage
            
            # Change server if above a threshold
            if currentServer.usage > threshold-thresholdRange:
                # Update id
                sId = sId+1
                if sId >= len(s): sId = 0
Esempio n. 4
0
def get_exclusions(request, naics_code, link_page=None, all_langs=False):
	with request.connmgr.get_connection() as conn:
		cursor = conn.execute('EXEC dbo.sp_NAICS_Exclusion_l ?,?', str(naics_code), all_langs)
		exclusions = cursor.fetchall()
		
		cursor.nextset()

		uses = cursor.fetchall()

		cursor.close()

	
	uses = dict((k, list(v)) for k,v in groupby(uses, attrgetter('Exclusion_ID')))

	output = []
	for establishment, exclusions in groupby(exclusions, attrgetter('Establishment')):
		if establishment:
			output.extend([Markup('<p>'), _('Establishments primarily engaged in:', request), Markup('</p>')])

		output.append(Markup('<ul>'))
		for exclusion in exclusions:
			use_instead = "; ".join(link_code(request, x.Code, x.Code, link_page) + ' ' + escape(x.Classification) for x in (uses.get(exclusion.Exclusion_ID) or []))
			if use_instead:
				use_instead = use_instead.join([" (", ")"])

			output.extend([Markup('<li>'), escape(exclusion.Description), use_instead,Markup('</li>')])
			

		output.append(Markup('</ul>'))

	return Markup(''.join(output))
Esempio n. 5
0
    def post(self, request, device_type):
        if device_type == 'netprofiler':
            device = 'NetProfiler'
            form = NetProfilerInputForm(request.POST)
        elif device_type == 'netshark':
            device = 'NetShark'
            form = NetSharkInputForm(request.POST)
        else:
            raise Http404

        results = None
        if form.is_valid():
            data = form.cleaned_data
            if device_type == 'netprofiler':
                profiler = DeviceManager.get_device(data['device'])

                results = profiler.search_columns(realms=[data['realm']],
                                                  centricities=[data['centricity']],
                                                  groupbys=[data['groupby']])
                results.sort(key=operator.attrgetter('key'))
                results.sort(key=operator.attrgetter('iskey'), reverse=True)
                results = [(c.iskey, c.key, c.label, c.id) for c in results]
            elif device_type == 'netshark':
                shark = DeviceManager.get_device(data['device'])

                results = [(f.id, f.description, f.type) for f in shark.get_extractor_fields()]
                results.sort(key=operator.itemgetter(0))

        return render_to_response('help.html',
                                  {'device': device,
                                   'form': form,
                                   'results': results},
                                  context_instance=RequestContext(request))
Esempio n. 6
0
def partsFromnSegs5(components):
    segments = unwrap(components)
    if len(segments) == 5:
        return (1, 1, 1, 1, 1)
    elif len(segments) == 4:
        widths = map(attrgetter("width"), segments)
        widest = general.argmax(widths)
        result = [1] * 4
        result[widest] = 2
        return result
    elif len(segments) == 3:
        widths = map(attrgetter("width"), segments)
        narrowest = float(min(widths))
        reduced = map(lambda width: width / narrowest, widths)
        distances = map(lambda exp: distance(exp, reduced), five3)
        #print widths, narrowest, reduced, zip(distances, five3)
        return five3[general.argmin(distances)]
    elif len(segments) == 2:
        w0, w1 = segments[0].width, segments[1].width
        frac = float(w0) / float(w1)
        if frac < 1.0: frac = 1.0 / frac
        if frac > 2.75: return (4, 1) if w0 > w1 else (1, 4)
        else: return (3, 2) if w0 > w1 else (2, 3)
    elif len(segments) == 1:
        return (5, )
    else: return (1, ) * len(segments) # raise ValueError("Incorrect number of components: %d" % (len(segments)))
Esempio n. 7
0
    def __init__(self, inputs, group_tag, group_operator):
        groups = [None]
        self.group_idx = [[]]
        group_inputs = [[]]
        # compute 
        for i, s in enumerate(inputs):
            if not s.get(group_tag, None) in groups:
                groups.append(s[group_tag])
                self.group_idx.append(list())
                group_inputs.append(list())

            g_idx = groups.index(s.get(group_tag, None))
            group_inputs[g_idx].append(s)
            self.group_idx[g_idx].append(i)


        self.operators = [group_operator(x) if len(x) else PrintOperator([])
                          for x in group_inputs]

        for o in self.operators:
            for i in xrange(0, len(o.outputs)):
                o.outputs[i]['Metadata/Extra/Operator'] = 'tgroup(%s, %s)' % (group_tag,
                                                                              str(o))
                                                                       
        self.block_streaming = reduce(operator.__or__,
                                      map(operator.attrgetter('block_streaming'), 
                                          self.operators))
        Operator.__init__(self, inputs, 
                          outputs=util.flatten(map(operator.attrgetter('outputs'), 
                                                   self.operators)))
Esempio n. 8
0
def AnalyzeTrajectory(trajectory):
    "Debug function to look at some basic data for each recorded trajectory"
    print "Analysing"
    letters = trajectory.GetLetterSequence()
    stime = time.time()
    # substringWorked, _ = IsSubstring(Contract(trajectory.word), Contract(''.join(letters)))
    global mydict
    wordRankings = []
    matches = myDict.GetMatches(Contract(letters))
    print time.time() - stime
    stime = time.time()
    print matches
    for word in matches:
        letterData = CollectTrajectoryData(trajectory, Contract(word))
        goodData = filter(lambda x: x.usedInWord, letterData)
        badData = filter(lambda x: not x.usedInWord, letterData)
        wordRank = [word, []]
        goodDataTime = np.mean(map(attrgetter('totalTimeSpent'), goodData))
        badDataTime = np.mean(map(attrgetter('totalTimeSpent'), badData))
        goodDataAngle = np.mean(map(attrgetter('directionInDotOut'), goodData))
        badDataAngle = np.mean(map(attrgetter('directionInDotOut'), badData))
        score = goodDataTime * 1 + badDataTime * -1 + goodDataAngle * -2 + badDataAngle * 1 + len(Contract(word)) * (1 / 3.0)
        wordRank[1] = [goodDataTime, badDataTime, goodDataAngle, badDataAngle, score]
        wordRankings.append(wordRank)
    print time.time() - stime

    top4words = []
    for wordRank in sorted(wordRankings, key=lambda x:-(x[1][-1]))[:10]:
        if wordRank[0] not in top4words:
            top4words.append(wordRank[0])
        if len(top4words) == 4:
            break

    print top4words
    return top4words
Esempio n. 9
0
 def initial_context(self, request, slug, entities):
     context = super(CreateView, self).initial_context(request)
     
     try:
         tour_type = self.conf.types[slug]
     except KeyError:
         raise Http404()
     else:
         tour_type['slug'] = slug
     
     context.update({
         'tour_type': tour_type,
         'entities': [],
         'attractions': dict(
             (et, sorted(et.entities_completion.filter(location__isnull=False),
                         key=attrgetter('title')))
                 for et in EntityType.objects.filter(
                     slug__in=tour_type['attraction_types'])),
         'all_pois': sorted(Entity.objects.filter(
             all_types_completion__slug__in=tour_type['attraction_types']),
             key=attrgetter('title'))
     })
     
     for entity in entities.split('/'):
         try:
             scheme, value = entity.split(':')
         except ValueError:
             continue
         context['entities'].append(get_entity(scheme, value))
     
     return context
Esempio n. 10
0
    def update_node_states(self):
        """ Updating node states

        PegasusGUI only
        """
        if not self.juju_state:
            return
        deployed_services = sorted(self.juju_state.services,
                                   key=attrgetter('service_name'))
        deployed_service_names = [s.service_name for s in deployed_services]

        charm_classes = sorted(
            [m.__charm_class__ for m in
             utils.load_charms(self.config.getopt('charm_plugin_dir'))
             if m.__charm_class__.charm_name in
             deployed_service_names],
            key=attrgetter('charm_name'))

        self.nodes = list(zip(charm_classes, deployed_services))

        for n in deployed_services:
            for u in n.units:
                if u.is_horizon and u.agent_state == "started":
                    self.ui.set_dashboard_url(
                        u.public_address, 'ubuntu',
                        self.config.getopt('openstack_password'))
                if u.is_jujugui and u.agent_state == "started":
                    self.ui.set_jujugui_url(u.public_address)
        if len(self.nodes) == 0:
            return
        else:
            self.ui.render_services_view(self.nodes, self.juju_state,
                                         self.maas_state, self.config)
Esempio n. 11
0
def create_schedule_plan(workflow, resource_config_list):
    """
    Create a schedule plan using the Myopic scheduling algorithm
    :param workflow: A workflow Object
    :param resource_config_list: a List of Resource Config objects
    :return: a SchedulePlan object
    """
    resource_schedule_list, resource_names = prepare_resrc_config(resource_config_list)
    # all task to be processed
    all_tasks = list(workflow.tasks)
    schedule_mapping_list = []
    resources = sorted(list(resource_schedule_list), key=attrgetter('ready_time'))
    while all_tasks:  # While all_tasks is not empty
        sched_tasks = [m.task for m in schedule_mapping_list]
        ready_tasks = [t for t in all_tasks
                       if (not t in schedule_mapping_list) and
                       utils.contains_list(t.parents, sched_tasks)]
        for t in ready_tasks:
            r = resources[0]
            d = t.complexity_factor / r.speed_factor
            st = max(r.ready_time, parents_ready_time(t.parents, schedule_mapping_list))
            schedule_mapping_list.append(ScheduleMapping(r, t, st, d))
            r.ready_time = st + d
            all_tasks.remove(t)
            resources = sorted(resource_schedule_list, key=attrgetter('ready_time'))

    return SchedulePlan('myopic', schedule_mapping_list, workflow, resource_config_list, resource_names)
Esempio n. 12
0
    def get_context_data(self, **kwargs):
        context = super(ContestList, self).get_context_data(**kwargs)
        present, active, future = [], [], []
        for contest in self._get_queryset().exclude(end_time__lt=self._now):
            if contest.start_time > self._now:
                future.append(contest)
            else:
                present.append(contest)

        if self.request.user.is_authenticated:
            for participation in ContestParticipation.objects.filter(virtual=0, user=self.request.profile,
                                                                     contest_id__in=present) \
                    .select_related('contest').prefetch_related('contest__organizers'):
                if not participation.ended:
                    active.append(participation)
                    present.remove(participation.contest)

        active.sort(key=attrgetter('end_time'))
        future.sort(key=attrgetter('start_time'))
        context['active_participations'] = active
        context['current_contests'] = present
        context['future_contests'] = future
        context['now'] = self._now
        context['first_page_href'] = '.'
        return context
Esempio n. 13
0
def process_translations(content_list):
    """ Finds all translation and returns tuple with two lists (index,
    translations).  Index list includes items in default language or items
    which have no variant in default language.

    Also, for each content_list item, it sets attribute 'translations'
    """
    content_list.sort(key=attrgetter("slug"))
    grouped_by_slugs = groupby(content_list, attrgetter("slug"))
    index = []
    translations = []

    for slug, items in grouped_by_slugs:
        items = list(items)
        # find items with default language
        default_lang_items = list(filter(attrgetter("in_default_lang"), items))
        len_ = len(default_lang_items)
        if len_ > 1:
            logger.warning('there are %s variants of "%s"' % (len_, slug))
            for x in default_lang_items:
                logger.warning("    {}".format(x.source_path))
        elif len_ == 0:
            default_lang_items = items[:1]

        if not slug:
            logger.warning(
                ("empty slug for {!r}. " "You can fix this by adding a title or a slug to your " "content").format(
                    default_lang_items[0].source_path
                )
            )
        index.extend(default_lang_items)
        translations.extend([x for x in items if x not in default_lang_items])
        for a in items:
            a.translations = [x for x in items if x != a]
    return index, translations
Esempio n. 14
0
    def test_contextual_filter_curated(self):
        """
        Specific filters should apply even to promoted objects
        Promotion should still apply to promoted objects still included in qs
        """
        curated_context2 = Curation.objects.create(
            weight=2,
            context_object=self.context1,
            content_object=self.c3
        )

        self.assertQuerysetEqual(
            TestContent.objects.exclude(id=self.c2.id).curated(context=self.context1),
            [
                self.c3.name,
                self.c1.name,
            ],
            attrgetter("name")
        )

        self.assertQuerysetEqual(
            TestContent.objects.filter(id__lt=3).curated(context=self.context1),
            [
                self.c2.name,
                self.c1.name,
            ],
            attrgetter("name")
        )
Esempio n. 15
0
def match_passage_text(passage, matchers, out):
    passage_tokens = sorted(passage.layer(layer0.LAYER_ID).all, key=attrgetter("position"))
    for paragraph, terminals in groupby(passage_tokens, key=attrgetter("paragraph")):
        tokens = [terminal.text for terminal in terminals]
        no_space_text = "".join(tokens)
        match = next(filter(None, (matcher(no_space_text) for matcher in matchers)), "@@@" + " ".join(tokens))
        print(passage.ID, match, sep="\t", file=out)
Esempio n. 16
0
    def test_filter_curated(self):
        """
        Specific filters should apply even to promoted objects
        Promotion should still apply to promoted objects still included in qs
        """
        c1 = Curation(content_object=self.c2, weight=1)
        c1.save()
        c2 = Curation(content_object=self.c3, weight=2)
        c2.save()

        self.assertQuerysetEqual(
            TestContent.objects.exclude(id=self.c2.id).curated(),
            [
                self.c3.name,
                self.c1.name,
            ],
            attrgetter("name")
        )

        self.assertQuerysetEqual(
            TestContent.objects.filter(id__lt=3).curated(),
            [
                self.c2.name,
                self.c1.name,
            ],
            attrgetter("name")
        )
Esempio n. 17
0
    def test_contextual_ordering_with_other_curation(self):
        c = Curation.objects.create(
            weight=2,
            content_object=self.c3
        )

        self.assertQuerysetEqual(
            TestContent.objects.curated(context=self.context1),
            [
                self.c2.name,
                self.c1.name,
                self.c3.name,
            ],
            attrgetter("name")
        )

        self.assertQuerysetEqual(
            TestContent.objects.curated(),
            [
                self.c3.name,
                self.c1.name,
                self.c2.name,
            ],
            attrgetter("name")
        )
Esempio n. 18
0
    def get_completions(self, document, complete_event, smart_completion=None):
        word_before_cursor = document.get_word_before_cursor(WORD=True)

        if smart_completion is None:
            smart_completion = self.smart_completion

        # If smart_completion is off then match any word that starts with
        # 'word_before_cursor'.
        if not smart_completion:
            matches = self.find_matches(word_before_cursor, self.all_completions,
                                        mode='strict')
            completions = [m.completion for m in matches]
            return sorted(completions, key=operator.attrgetter('text'))

        matches = []
        suggestions = suggest_type(document.text, document.text_before_cursor)

        for suggestion in suggestions:
            suggestion_type = type(suggestion)
            _logger.debug('Suggestion type: %r', suggestion_type)

            # Map suggestion type to method
            # e.g. 'table' -> self.get_table_matches
            matcher = self.suggestion_matchers[suggestion_type]
            matches.extend(matcher(self, suggestion, word_before_cursor))

        # Sort matches so highest priorities are first
        matches = sorted(matches, key=operator.attrgetter('priority'),
                         reverse=True)

        return [m.completion for m in matches]
Esempio n. 19
0
    def output(self, args, begin_ns, end_ns, final=0):
        count = 0
        limit = args.top
        graph = Pyasciigraph()
        values = []
        print('%s to %s' % (ns_to_asctime(begin_ns), ns_to_asctime(end_ns)))
        for tid in sorted(self.state.tids.values(),
                          key=operator.attrgetter('allocated_pages'),
                          reverse=True):
            values.append(("%s (%d)" % (tid.comm, tid.tid),
                          tid.allocated_pages))
            count = count + 1
            if limit > 0 and count >= limit:
                break
        for line in graph.graph("Per-TID Memory Allocations", values,
                                unit=" pages"):
            print(line)

        values = []
        count = 0
        for tid in sorted(self.state.tids.values(),
                          key=operator.attrgetter('freed_pages'),
                          reverse=True):
            values.append(("%s (%d)" % (tid.comm, tid.tid), tid.freed_pages))
            count = count + 1
            if limit > 0 and count >= limit:
                break
        for line in graph.graph("Per-TID Memory Deallocation", values,
                                unit=" pages"):
            print(line)
        print("\nTotal memory usage:\n- %d pages allocated\n- %d pages freed" %
              (self.state.mm["allocated_pages"], self.state.mm["freed_pages"]))
Esempio n. 20
0
    def _find_resources(cls, attr_patterns):
        # TODO: Memoize
        _LOG.debug('Search Started')

        result = []

        for attribute, pattern in attr_patterns:
            op = operator.attrgetter(attribute)

            if pattern:
                _pattern = unicode(pattern).strip()
            else:
                _LOG.debug('Skipping Attribute "{0}": No Pattern "{1}"'.format(attribute, pattern))
                continue

            if _pattern:
                pattern = re.compile(_pattern)
            else:
                _LOG.debug('Skipping Attribute "{0}": Empty Pattern'.format(attribute))
                continue

            result.extend([r for r in cls._resource_collection.itervalues() if pattern.match(op(r))])

        result.sort(key=operator.attrgetter('name'))

        _LOG.debug('Search Complete: {0:3d} matches'.format(len(result)))
        return result
Esempio n. 21
0
    def _get_views(self):
        IrModelData = self.env['ir.model.data'].with_context(active_test=True)
        dmodels = ['ir.ui.view', 'ir.actions.report.xml', 'ir.ui.menu']

        for module in self:
            # Skip uninstalled modules below, no data to find anyway.
            if module.state not in ('installed', 'to upgrade', 'to remove'):
                module.views_by_module = ""
                module.reports_by_module = ""
                module.menus_by_module = ""
                continue

            # then, search and group ir.model.data records
            imd_models = defaultdict(list)
            imd_domain = [('module', '=', module.name), ('model', 'in', tuple(dmodels))]
            for data in IrModelData.search(imd_domain):
                imd_models[data.model].append(data.res_id)

            def browse(model):
                # as this method is called before the module update, some xmlid
                # may be invalid at this stage; explictly filter records before
                # reading them
                return self.env[model].browse(imd_models[model]).exists()

            def format_view(v):
                return '%s%s (%s)' % (v.inherit_id and '* INHERIT ' or '', v.name, v.type)

            module.views_by_module = "\n".join(sorted(map(format_view, browse('ir.ui.view'))))
            module.reports_by_module = "\n".join(sorted(map(attrgetter('name'), browse('ir.actions.report.xml'))))
            module.menus_by_module = "\n".join(sorted(map(attrgetter('complete_name'), browse('ir.ui.menu'))))
Esempio n. 22
0
 def do_show(self, arg):
     '''Shows various things'''
     args = arg.split()
     if args[0] in (c.name for c in self.sd.creatures):
         print(repr(next((c for c in self.sd.creatures
                          if c.name == args[0]), None)))
     elif args[0] == 'verbosity':
         print('verbosity = {}'.format(self.sd.settings.verbosity))
     elif args[0] == 'random':
         print(repr(rand.choice(self.sd.creatures)))
     elif args[0] == 'max':
         try:
             print(repr(max(self.sd.creatures,
                            key = op.attrgetter(args[1]))))
         except:
             print("Couldn't get the maximum of that")
     elif args[0] == 'min':
         try:
             print(repr(min(self.sd.creatures,
                            key = op.attrgetter(args[1]))))
         except:
             print("Couldn't get the minimum of that.")
     elif arg == 'most skillful':
         def _skill(c):
             'Determine skill number'
             if c.survived > 0:
                 return (float(c.kills ** 2) / c.survived)
             else:
                 return 0
         print(repr(max(self.sd.creatures, key = _skill)))
     else:
         print("Not sure what you want me to show you :(")
Esempio n. 23
0
    def run(self, **kwargs):
        if self.cfg.migrate.migrate_users:
            LOG.info("Users will be migrated. Skipping this check.")
            return
        src_identity = self.src_cloud.resources[utils.IDENTITY_RESOURCE]
        dst_identity = self.dst_cloud.resources[utils.IDENTITY_RESOURCE]

        src_keystone_client = src_identity.get_client()
        dst_keystone_client = dst_identity.get_client()

        LOG.info("Going to get all users from source cloud, this may take a "
                 "while for large LDAP-backed clouds, please be patient")

        src_users = src_keystone_client.users.list()
        dst_users = dst_keystone_client.users.list()

        src_user_names = set(map(attrgetter('name'), src_users))
        dst_user_names = set(map(attrgetter('name'), dst_users))

        users_missing_on_dst = src_user_names - dst_user_names

        if users_missing_on_dst:
            msg = "{n} missing users on destination: {users}".format(
                n=len(users_missing_on_dst),
                users=", ".join(users_missing_on_dst))
            LOG.error(msg)
            raise cf_exceptions.AbortMigrationError(msg)

        LOG.info("All users are available on source, migration can proceed")
Esempio n. 24
0
def clean_permit_db():
	account_nums = []
	dupe_nums = []
	permits = Permit.query.all()
	for permit in permits:
		account_nums.append(permit.account_num)
	for item in account_nums:
		instances = account_nums.count(item)
		if instances > 1:
			dupe_nums.append(item)
	dupes_set = list(set(dupe_nums))
	for item in dupes_set:
		dupe_records = Permit.query.filter(Permit.account_num==item).all()
		dupe_with_received = []
		dupe_no_received = []
		for item in dupe_records:
			if item.received != None:
				dupe_with_received.append(item)
			else:
				dupe_no_received.append(item)
		if len(dupe_with_received) > 0:
			dupe_with_received_sorted = sorted(dupe_with_received, key=attrgetter("submitted"))
			keep = dupe_with_received_sorted.pop()
			for item in dupe_with_received_sorted:
				db.session.delete(item)
			for item in dupe_no_received:
				db.session.delete(item)
		else:
			dupe_no_received_sorted = sorted(dupe_no_received, key=attrgetter("submitted"))
			keep = dupe_no_received_sorted.pop()
			for item in dupe_no_received_sorted:
				db.session.delete(item)
	db.session.commit()
Esempio n. 25
0
def selectISOLAstreams(station_list):
    """
    Selects three streams per station for each component N,E,Z mandatory
    """

    # sort stations by priority and then by distance from origin
    _temp_station_list = sorted(station_list, key=operator.attrgetter('distance_by_origin'))
    station_list = sorted(_temp_station_list, key=operator.attrgetter('priority'), reverse=True)

    # stations can't be more than 21 in order to run ISOLA
    station_list = limitStation_list(station_list)

    for station in station_list:

        # get all N, E, Z possible streams
        _list_N = [x for x in station.stream_list if x.code[2] == 'N']
        _list_E = [x for x in station.stream_list if x.code[2] == 'E']
        _list_Z = [x for x in station.stream_list if x.code[2] == 'Z']

        # sort streams by priority
        _list_N.sort(key=operator.attrgetter("priority"), reverse=True)
        _list_E.sort(key=operator.attrgetter("priority"), reverse=True)
        _list_Z.sort(key=operator.attrgetter("priority"), reverse=True)

        # stream_list contains at the most one stream per component (N,E,Z)
        station.stream_list = _list_N[0:1] + _list_E[0:1] + _list_Z[0:1]

    return station_list
Esempio n. 26
0
def get_data_transfer(dstore):
    """
    Determine the amount of data transferred from the controller node
    to the workers and back in a classical calculation.

    :param dstore: a :class:`openquake.commonlib.datastore.DataStore` instance
    :returns: (block_info, to_send_forward, to_send_back)
    """
    oqparam = OqParam.from_(dstore.attrs)
    sitecol = dstore['sitecol']
    rlzs_assoc = dstore['rlzs_assoc']
    info = dstore['job_info']
    sources = dstore['composite_source_model'].get_sources()
    num_gsims_by_trt = groupby(rlzs_assoc, operator.itemgetter(0),
                               lambda group: sum(1 for row in group))
    gsims_assoc = rlzs_assoc.gsims_by_trt_id
    to_send_forward = 0
    to_send_back = 0
    block_info = []
    for block in split_in_blocks(sources, oqparam.concurrent_tasks or 1,
                                 operator.attrgetter('weight'),
                                 operator.attrgetter('trt_model_id')):
        num_gsims = num_gsims_by_trt.get(block[0].trt_model_id, 0)
        back = info['n_sites'] * info['n_levels'] * info['n_imts'] * num_gsims
        to_send_back += back * 8  # 8 bytes per float
        args = (block, sitecol, gsims_assoc, PerformanceMonitor(''))
        to_send_forward += sum(len(p) for p in parallel.pickle_sequence(args))
        block_info.append((len(block), block.weight))
    return numpy.array(block_info, block_dt), to_send_forward, to_send_back
Esempio n. 27
0
    def generate_feeds(self, writer):
        """Generate the feeds from the current context, and output files."""

        if self.settings.get("FEED"):
            writer.write_feed(self.articles, self.context, self.settings["FEED"])

        if self.settings.get("FEED_RSS"):
            writer.write_feed(self.articles, self.context, self.settings["FEED_RSS"], feed_type="rss")

        for cat, arts in self.categories:
            arts.sort(key=attrgetter("date"), reverse=True)
            if self.settings.get("CATEGORY_FEED"):
                writer.write_feed(arts, self.context, self.settings["CATEGORY_FEED"] % cat)

            if self.settings.get("CATEGORY_FEED_RSS"):
                writer.write_feed(arts, self.context, self.settings["CATEGORY_FEED_RSS"] % cat, feed_type="rss")

        if self.settings.get("TAG_FEED") or self.settings.get("TAG_FEED_RSS"):
            for tag, arts in self.tags.items():
                arts.sort(key=attrgetter("date"), reverse=True)
                if self.settings.get("TAG_FEED"):
                    writer.write_feed(arts, self.context, self.settings["TAG_FEED"] % tag)

                if self.settings.get("TAG_FEED_RSS"):
                    writer.write_feed(arts, self.context, self.settings["TAG_FEED_RSS"] % tag, feed_type="rss")

        if self.settings.get("TRANSLATION_FEED"):
            translations_feeds = defaultdict(list)
            for article in chain(self.articles, self.translations):
                translations_feeds[article.lang].append(article)

            for lang, items in translations_feeds.items():
                items.sort(key=attrgetter("date"), reverse=True)
                writer.write_feed(items, self.context, self.settings["TRANSLATION_FEED"] % lang)
Esempio n. 28
0
 def create_children(self, f, parent):
     for child in sorted(f.folders, key=attrgetter('name')):
         i = self.item_func(child, parent)
         self.create_children(child, i)
     if self.show_files:
         for child in sorted(f.files, key=attrgetter('name')):
             i = self.item_func(child, parent)
Esempio n. 29
0
    def update_node_states(self):
        """ Updating node states

        PegasusGUI only
        """
        if not self.juju_state:
            return
        deployed_services = sorted(self.juju_state.services,
                                   key=attrgetter('service_name'))
        deployed_service_names = [s.service_name for s in deployed_services]

        charm_classes = sorted(
            [m.__charm_class__ for m in
             utils.load_charms(self.config.getopt('charm_plugin_dir'))
             if m.__charm_class__.charm_name in
             deployed_service_names],
            key=attrgetter('charm_name'))

        self.nodes = list(zip(charm_classes, deployed_services))

        if len(self.nodes) == 0:
            return
        else:
            if not self.ui.services_view:
                self.ui.render_services_view(
                    self.nodes, self.juju_state,
                    self.maas_state, self.config)
            else:
                self.ui.refresh_services_view(self.nodes, self.config)
Esempio n. 30
0
    def _port_stats_reply_handler(self, ev):
        msg = ev.msg
        body = msg.body
        dpid = msg.datapath.id
        switchCollect = self.lastCollect.setdefault(dpid, SwitchStats(dpid))
        ports = switchCollect.getPotrs()

        self.logger.info(
            "datapath            port     " "rx-pkts   rx-bytes   rx-error" "tx-pkts   tx-bytes   tx-error"
        )
        self.logger.info(
            "-----------    --------------" "-------   --------   --------" "-------   --------   --------"
        )
        for stat in sorted(body, key=attrgetter("port_no")):
            self.logger.info(
                "%016x %8x %8d %8d %8d %8d %8d %8d ",
                ev.msg.datapath.id,
                stat.port_no,
                stat.rx_packets,
                stat.rx_bytes,
                stat.rx_errors,
                stat.tx_packets,
                stat.tx_bytes,
                stat.tx_errors,
            )

        for stat in sorted(body, key=attrgetter("port_no")):
            portNo = stat.port_no
            if portNo < 10000:
                port = ports.setdefault(portNo, Portstats(portNo))
                port.setFieds(stat, time.time())

        if self.superExist:
            pass
Esempio n. 31
0
import time
import operator
import numpy

from openquake.baselib import general, hdf5
from openquake.baselib.python3compat import decode
from openquake.hazardlib import probability_map, stats
from openquake.hazardlib.source.rupture import (BaseRupture, RuptureProxy,
                                                to_arrays)
from openquake.commonlib import datastore

U16 = numpy.uint16
U32 = numpy.uint32
F32 = numpy.float32
by_taxonomy = operator.attrgetter('taxonomy')
code2cls = BaseRupture.init()
weight = operator.itemgetter('n_occ')


class NotFound(Exception):
    pass


def build_stat_curve(pcurve, imtls, stat, weights):
    """
    Build statistics by taking into account IMT-dependent weights
    """
    poes = pcurve.array.T  # shape R, L
    assert len(poes) == len(weights), (len(poes), len(weights))
    L = imtls.size
Esempio n. 32
0
    def populate_cpu_children(self):
        """Populates child events into each underlying FunctionEvent object.
        One event is a child of another if [s1, e1) is inside [s2, e2). Where
        s1 and e1 would be start and end of the child event's interval. And
        s2 and e2 start and end of the parent event's interval

        Example: In event list [[0, 10], [1, 3], [3, 4]] would have make [0, 10]
        be a parent of two other intervals.

        If for any reason two intervals intersect only partially, this function
        will not record a parent child relationship between then.
        """
        if self.cpu_children_populated:
            return

        # Some events can be async (i.e. start and end on different threads),
        # since it's generally undefined how to attribute children ranges to
        # async ranges, we do not use them when calculating nested ranges and stats
        sync_events = [evt for evt in self if not evt.is_async]
        events = sorted(
            sync_events,
            key=attrgetter("thread"),
        )
        # Group by both thread and node_id, so that events that happen to have
        # the same thread_id but are from different nodes aren't incorrectly
        # grouped together.
        threads = itertools.groupby(events,
                                    key=lambda event:
                                    (event.thread, event.node_id))

        # For each thread we keep a stack of current nested parents.
        # We maintain the invariant that each interval is a subset of all other
        # intervals lower in the stack.
        #
        # First we sort the intervals by their start time. Then we iterate over them.
        # Every time we see a new interval we remove several parents from
        # the top until we restore the invariant. Then parent child relationship
        # if recorded if the stack is not empty.
        # Finally we add new interval to the list
        #
        # Algorithm has O(N * log(N)) complexity where N is number of
        # intervals
        for thread_id, thread_events in threads:
            thread_events = sorted(
                thread_events,
                key=lambda event:
                [event.cpu_interval.start, -event.cpu_interval.end],
            )
            current_events = []
            cur_end = 0
            for event in thread_events:
                while len(current_events) > 0:
                    parent = current_events[-1]
                    if event.cpu_interval.start >= parent.cpu_interval.end or \
                            event.cpu_interval.end > parent.cpu_interval.end:
                        # this can't be a parent
                        current_events.pop()
                    else:
                        parent.append_cpu_child(event)
                        assert (
                            event.cpu_parent is None
                        ), "There is already a CPU parent event for {}".format(
                            event.key)
                        event.set_cpu_parent(parent)
                        break

                current_events.append(event)

        self._cpu_children_populated = True
Esempio n. 33
0
    def __init__(self,
                 intervals,
                 depth=16,
                 minbucket=16,
                 _extent=None,
                 maxbucket=512):
        """\
        `intervals` a list of intervals *with start and stop* attributes.
        `depth`     the depth of the tree
        `minbucket` if any node in the tree has fewer than minbucket
                    elements, make it a leaf node
        `maxbucket` even it at specifined `depth`, if the number of intervals >
                    maxbucket, split the node, make the tree deeper.

        depth and minbucket usually do not need to be changed. if
        dealing with large numbers (> 1M) of intervals, the depth could
        be increased to 24.

        Useage:

         >>> ivals = [Interval(2, 3), Interval(1, 8), Interval(3, 6)]
         >>> tree = IntervalTree(ivals)
         >>> sorted(tree.find(1, 2))
         [Interval(2, 3), Interval(1, 8)]

        this provides an extreme and satisfying performance improvement
        over searching manually over all 3 elements in the list (like
        a sucker). 

        the IntervalTree class now also supports the iterator protocol
        so it's easy to loop over all elements in the tree:

         >>> import operator
         >>> sorted([iv for iv in tree], key=operator.attrgetter('start'))
         [Interval(1, 8), Interval(2, 3), Interval(3, 6)]


        NOTE: any object with start and stop attributes can be used
        in the incoming intervals list.
        """

        depth -= 1
        if (depth == 0
                or len(intervals) < minbucket) and len(intervals) < maxbucket:
            self.intervals = intervals
            self.left = self.right = None
            return

        if _extent is None:
            # sorting the first time through allows it to get
            # better performance in searching later.
            intervals.sort(key=operator.attrgetter('start'))

        left, right = _extent or \
               (intervals[0].start, max(i.stop for i in intervals))
        #center = intervals[len(intervals)/ 2].stop
        center = (left + right) / 2.0

        self.intervals = []
        lefts, rights = [], []

        for interval in intervals:
            if interval.stop < center:
                lefts.append(interval)
            elif interval.start > center:
                rights.append(interval)
            else:  # overlapping.
                self.intervals.append(interval)

        self.left = lefts and IntervalTree(
            lefts, depth, minbucket, (intervals[0].start, center)) or None
        self.right = rights and IntervalTree(rights, depth, minbucket,
                                             (center, right)) or None
        self.center = center
    def extract(self, video_path, dir=None, mode=0, debug = -1, **kwargs):
        self.set(mode, debug, **kwargs)

        start_time = time()
        logger.debug(sys.executable)
        if not dir:
            dir = os.path.splitext(video_path)[0]
            if not os.path.exists(dir):
                os.mkdir(dir)

        logger.debug("initial allocated memory: %d MB" % (psutil.Process(os.getpid()).memory_info().rss >> 20))
        logger.info("target video: " + os.path.abspath(video_path))
        logger.info("frame save directory: " + os.path.abspath(dir))

        # load video and compute diff between frames
        cap = cv2.VideoCapture(str(video_path))
        curr_frame = None
        prev_frame = None
        frame_diffs = []
        frames = []
        success, frame = cap.read()
        i = 0
        while(success):
            luv = cv2.cvtColor(frame, cv2.COLOR_BGR2LUV)
            curr_frame = luv
            if curr_frame is not None and prev_frame is not None:
                #logic here
                diff = cv2.absdiff(curr_frame, prev_frame)
                diff_sum = np.sum(diff)
                diff_sum_mean = diff_sum / (diff.shape[0] * diff.shape[1])
                frame_diffs.append(diff_sum_mean)
                frame = Frame(i, diff_sum_mean)
                frames.append(frame)
            prev_frame = curr_frame
            i = i + 1
            success, frame = cap.read()
        cap.release()

        # compute keyframe
        keyframe_id_set = set()
        if self.use_top_order:
            # sort the list in descending order
            frames.sort(key=operator.attrgetter("diff"), reverse=True)
            for keyframe in frames[:self.num_top_frames]:
                keyframe_id_set.add(keyframe.id)
        if self.use_thresh:
            for i in range(1, len(frames)):
                if (rel_change(np.float(frames[i - 1].diff), np.float(frames[i].diff)) >= self.thresh):
                    keyframe_id_set.add(frames[i].id)
        if self.use_local_maxima:
            diff_array = np.array(frame_diffs)
            sm_diff_array = smooth(diff_array, self.len_window)
            frame_indexes = np.asarray(argrelextrema(sm_diff_array, np.greater))[0]
            for i in frame_indexes:
                keyframe_id_set.add(frames[i - 1].id)

            plt.figure(figsize=(40, 20))
            plt.locator_params(numticks=100)
            plt.stem(sm_diff_array)
            plt.savefig(os.path.join(dir, 'plot.png'))
            plt.close('all'); gc.collect(); # these 2 statements will release the memory of plt and save 60MB

        # save all keyframes as image
        cap = cv2.VideoCapture(str(video_path))
        curr_frame = None
        keyframes = []
        success, frame = cap.read()
        idx = 0

        while(success):
            if idx in keyframe_id_set:
                name = "keyframe_%04d.jpg" %idx
                cv2.imwrite(os.path.join(dir, name), frame)
                keyframe_id_set.remove(idx)
            idx = idx + 1
            success, frame = cap.read()

        cap.release()

        end_time = time()
        logger.info("elapsed time: %d" % (end_time - start_time))
        logger.debug("end allocated memory: %dMB" % (psutil.Process(os.getpid()).memory_info().rss >> 20))
Esempio n. 35
0
            f = open('highscores.txt')
            scores = []
            l = []
            top_scores = []
            scores_d = {}
            for line in f:
                if line != '\n':
                    row = line.split(',')
                    scores.append(Score(row[0], int(row[1][:-1])))
                    scores_d[row[0]] = row[1][:-1]
            if len(scores) < 10:
                iteration = range(len(scores))
            else:
                iteration = range(10)
            for i in iteration:
                top_scores.append(max(scores, key=operator.attrgetter('score')))
                scores.remove(max(scores, key=operator.attrgetter('score')))
            print(scores_d)
            f.close()
            for event in pygame.event.get():
                if event.type == pygame.KEYDOWN:
                    gameCode = 'menu'

            for i in range(len(top_scores)):
                label_name = scoreFont.render(top_scores[i].name, 1, (255,255,255))
                label_score = scoreFont.render(str(top_scores[i].score), 1, (255,255,255))
                label_col = scoreFont.render(':', 1, (255,255,255))
                window.blit(label_col, (window_width/2, window_height/10 * i))
                window.blit(label_name, (window_width/4, window_height/10 * i))
                window.blit(label_score, (window_width/2 + window_width/20, window_height/10 * i))
Esempio n. 36
0
 def __init__(self, function, *variables, **kwargs):
     """
     """
     if len(FiniteDifferenceDerivative.formulas) == 0:
         # Load the formulas generated "by hand", which (for now, anyway) require fewer
         #   displacements than the automatically generated formulas if we also need to
         #   compute the lower order derivatives as well, as is the case with the computation
         #   of quartic forcefields. (But not, for instance, the B tensor.  So the
         #   FiniteDifferenceDerivative constructor could be optimized to take a parameter
         #   which specifies whether we should choose the formula with the fewest overall
         #   displacements or the fewest "new" displacements not needed for smaller derivatives)
         load_formulas()
     #--------------------------------------------------------------------------------#
     # miscellanea
     self._target_robustness = kwargs.pop('target_robustness', 2)
     self._value_function = kwargs.pop('value_function', None)
     self._delta_function = kwargs.pop('delta_function', None)
     self._delta = kwargs.pop('delta', None)
     self._forward = kwargs.pop('forward', False)
     self._function = function
     #--------------------------------------------------------------------------------#
     # type checking
     if type_checking_enabled:
         if not all(isinstance(v, FiniteDifferenceVariable) for v in variables):
             raise TypeError
         if not isinstance(self.target_robustness, int):
             raise TypeError
     #--------------------------------------------------------------------------------#
     # Get the variables and the orders....
     vars = listify_args(*variables)
     # Determine which formula we need
     vars = sorted(vars, key=id)
     # This is nasty, but it works...The zip(*list_of_lists) effectively "unzips"
     self._orders, self._variables = zip(
         *sorted(
             [(len(list(g)), k) for k, g in groupby(vars)],
             reverse=True)
     )
     #--------------------------------------------------------------------------------#
     # Determine which formula to use
     # This gets reused, so define a quicky function...
     def get_possibilities(formula_list):
         return [f for f in formula_list
             if f.orders == list(self.orders)
                     and f.robustness >= self.target_robustness
                     and (f.is_forward() if self._forward else f.is_central())
         ]
     #----------------------------------------#
     # First, try and get a "hand-generated" formula
     possibilities = get_possibilities(FiniteDifferenceDerivative.formulas)
     if len(possibilities) == 0:
         # We know how to generate single variable formulas to arbitrary order, so let's do it
         n_derivative_vars = len(self.orders)
         derivative_order = sum(self.orders)
         if n_derivative_vars == 1:
             # This long name is unweildy...
             gen_dict = FiniteDifferenceDerivative.generated_single_variable_formulas
             # See if we've already generated it...
             formula = gen_dict.get(
                 (
                     derivative_order,
                     self.target_robustness
                         + (1 if not self._forward and self.target_robustness % 2 == 1 else 0),
                     self._forward
                 ),
                 None)
             if formula is None:
                 # okay, we can generate it.
                 generate_single_variable_formulas(
                     derivative_order,
                     self.target_robustness
                         + (1 if not self._forward and self.target_robustness % 2 == 1 else 0),
                     self._forward)
                 formula = gen_dict[(
                     derivative_order,
                     self.target_robustness
                         + (1 if not self._forward and self.target_robustness % 2 == 1 else 0),
                     self._forward)]
             possibilities.append(formula)
             if sanity_checking_enabled:
                 possibilities = get_possibilities(possibilities)
         else:
             # we don't know how to generate these...yet...but I'm working on it!
             raise RuntimeError("Can't find formula for orders {0} and"
                                " robustness {1}".format(
                 self.orders, self.target_robustness))
     # Use the minimum robustness for now.  Later we can make it use
     #   the best possible without additional calculations.
     self._formula = sorted(possibilities, key=attrgetter('robustness'))[0]
Esempio n. 37
0
    def get_results(self):
        """New proper way to get results.

        return a list of all leagueplayers of the division with extra fields:
            - rank : integer
            - score : decimal
            - nb_win : integer
            - nb_loss : integer
            - nb_games : integer
            - results : a dict as such
            - is_active : true/false
            {opponent1 : [{'id':game1.pk, 'r':1/0},{'id':game2.pk, 'r':1/0},...],opponent2:}
        """
        sgfs = self.sgf_set.defer('sgf_text').select_related('winner', 'white', 'black').all()
        players = LeaguePlayer.objects.filter(division=self).prefetch_related('user__profile')
        # First create a list of players with extra fields
        results = []
        for player in players:
            player.n_win = 0
            player.n_loss = 0
            player.n_games = 0
            player.score = 0
            player.results = {}
            player.sos = 0
            player.sodos = 0
            results.append(player)
        for sgf in sgfs:
            if sgf.winner == sgf.white:
                loser = next(player for player in results if player.user == sgf.black)
                winner = next(player for player in results if player.user == sgf.white)
            else:
                loser = next(player for player in results if player.user == sgf.white)
                winner = next(player for player in results if player.user == sgf.black)
            winner.n_win += 1
            winner.n_games += 1
            winner.score += self.league_event.ppwin
            loser.n_loss += 1
            loser.n_games += 1
            loser.score += self.league_event.pploss
            if loser.pk in winner.results:
                winner.results[loser.pk].append({'id': sgf.pk, 'r': 1, 'p': sgf.result})
            else:
                winner.results[loser.pk] = [{'id': sgf.pk, 'r': 1, 'p': sgf.result}]
            if winner.pk in loser.results:
                loser.results[winner.pk].append({'id': sgf.pk, 'r': 0, 'p': sgf.result})
            else:
                loser.results[winner.pk] = [{'id': sgf.pk, 'r': 0, 'p': sgf.result}]

        # now let's set the active flag
        min_matchs = self.league_event.min_matchs
        for player in players:
            player.is_active = player.n_games >= min_matchs

        real_opponent = {}
        # calulcate the sos for each player
        for player in players:
            for opponent, info in player.results.items():
                for opponent_player in players:
                    if opponent is opponent_player.pk:
                        real_opponent = opponent_player
                for list_item in info:
                    if list_item.get('r') is 1:
                        player.sodos += real_opponent.n_win
                    player.sos += real_opponent.n_win

        results = sorted(
            results,
            key=attrgetter('score', 'n_games', 'sos', 'sodos'),
            reverse=True
        )
        return results
Esempio n. 38
0
def sort_repos(repos, rows):
    sorted_repos = sorted(repos, key=attrgetter("stars"), reverse=True)
    return sorted_repos[:rows]
Esempio n. 39
0
 def arrangeTB(self):
     """上端を基準に、上から下に並べます。
     """
     return self.arrange(attrgetter("top"), True)
Esempio n. 40
0
def simulate(optimistic=False):
    """Assume optimistically that in every tie (between 2nd and 3rd places) JPN comes ahead (it is second to arg in this group"""

    jpn = Team("JPN", favorite=1)
    sen = Team("SEN")
    col = Team("COL")
    pol = Team("POL")

    teams = [jpn, sen, col, pol]

    match(col, jpn, 1)

    match(pol, sen, 1)

    match(jpn, sen, 0)

    match(pol, col, 1)

    print("Current Standings")

    for team in teams:
        print(team)

    # simulate remaining games
    result_values = (-1, 0, 1)
    results = product(result_values, repeat=2)
    jpn_in = 0
    print("""
Matches:
    JPN x POL, SEN x COL
-1 means the left team wins
 0 means a tie
 1 means the right team wins
""")

    print("Optimistic? (assume in point ties JPN comes ahead)", optimistic)
    print()

    for result in results:
        jpn_h, sen_h, col_h, pol_h = [copy(team) for team in teams]
        match(jpn_h, pol_h, result[0])
        match(sen_h, col_h, result[1])

        standings_favorite = sorted([jpn_h, sen_h, col_h, pol_h],
                                    key=attrgetter('favorite'),
                                    reverse=optimistic)

        standings_pts = sorted(standings_favorite,
                               key=attrgetter('pts'),
                               reverse=True)

        print(format(str(tuple(result)), "17"), end="")

        if standings_pts[1].pts == standings_pts[2].pts:
            print("2nd Tie", end=" ")
        else:
            print("       ", end=" ")

        if standings_pts.index(jpn_h) < 2:
            print("*ADVANCE*", end=" ")
            jpn_in += 1
        else:
            print("_go home_", end=" ")

        print(', '.join([str(team) for team in standings_pts]))

    print("'JPN in' in a total of", jpn_in, "out of", 3**2, "scenarios")
Esempio n. 41
0
def list_orders():
    global ORDER
    ORDER.sort(key=operator.attrgetter('cust_id'))
    return ORDER
Esempio n. 42
0
    def get(self, request, post_uuid, post_comment_id):
        request_data = self._get_request_data(request, post_uuid,
                                              post_comment_id)

        serializer = GetPostCommentRepliesSerializer(data=request_data)
        serializer.is_valid(raise_exception=True)

        data = serializer.validated_data
        max_id = data.get('max_id')
        min_id = data.get('min_id')
        count_max = data.get('count_max', 10)
        count_min = data.get('count_min', 10)
        sort = data.get('sort', 'DESC')
        post_uuid = data.get('post_uuid')
        post_comment_id = data.get('post_comment_id')

        user = request.user

        sort_query = self.SORT_CHOICE_TO_QUERY[sort]

        if not max_id and not min_id:
            all_comment_replies = \
                user.get_comment_replies_for_comment_with_id_with_post_with_uuid(post_uuid=post_uuid,
                                                                                 post_comment_id=post_comment_id).order_by(
                    sort_query)[:count_max].all()
        else:
            post_comment_replies_max = []
            post_comment_replies_min = []
            if max_id:
                post_comment_replies_max = \
                    user.get_comment_replies_for_comment_with_id_with_post_with_uuid(post_uuid=post_uuid,
                                                                                     post_comment_id=post_comment_id,
                                                                                     max_id=max_id).order_by('-pk')[
                    :count_max]
                post_comment_replies_max = sorted(
                    post_comment_replies_max.all(),
                    key=operator.attrgetter('created'),
                    reverse=sort_query == self.SORT_CHOICE_TO_QUERY['DESC'])

            if min_id:
                post_comment_replies_min = \
                    user.get_comment_replies_for_comment_with_id_with_post_with_uuid(post_uuid=post_uuid,
                                                                                     post_comment_id=post_comment_id,
                                                                                     min_id=min_id).order_by('pk')[
                    :count_min]
                post_comment_replies_min = sorted(
                    post_comment_replies_min.all(),
                    key=operator.attrgetter('created'),
                    reverse=sort_query == self.SORT_CHOICE_TO_QUERY['DESC'])

            if sort_query == self.SORT_CHOICE_TO_QUERY['ASC']:
                all_comment_replies = list(
                    chain(post_comment_replies_max, post_comment_replies_min))
            elif sort_query == self.SORT_CHOICE_TO_QUERY['DESC']:
                all_comment_replies = list(
                    chain(post_comment_replies_min, post_comment_replies_max))

        post_comment_replies_serializer = PostCommentReplySerializer(
            all_comment_replies, many=True, context={"request": request})

        return Response(post_comment_replies_serializer.data,
                        status=status.HTTP_200_OK)
def DifferentialEvolution(pop_size=100, num_gen=50, x_range=None, F=1, CR=0.5, dim=2, obj_f=None, print_detail=False):
    """Differential Evolution.
    Basically, this function will try to minimize the Schwefel's function.
    This code is based on the description of wikipedia page.

    For those arguments, see below.
    """
    ## For better understanding, redifine the params here
    POPULATION_SIZE = pop_size   # number of agents in a population.
    NUM_GEN         = num_gen    # number of generation loops.
    X_RANGE         = x_range    # domain limit.
    F               = F          # a coefficience called differential weight.
    CR              = CR         # crossover probability.
    N               = dim        # dimention of search-space.
    OBJ_F           = obj_f      # objective function.


    os.makedirs('results', exist_ok=True)
    result_arr = [] # for storing the results of each generation.
    random.seed(64)

    # --- step 1 : Initialize all Agent x randomly in the search-space.
    population = create_population(POPULATION_SIZE, N, X_RANGE, OBJ_F)
    best = min(population, key=attrgetter('fitness'))

    # --- Until the termination criterion met, repeat following.
    if print_detail:
        print('Generation loop starts. ')
        print("Generation: 0. Initial best fitness: {}".format(best.getFitness()))

    g_ = 0
    # --- Now it loops until adequate fitness was satisfied.
    while best.getFitness() > 1:
        g_ += 1
        # --- For each Agent x in the population:
        for x in range(POPULATION_SIZE):

            # --- step 2 : Pick up there different Agents' index from the population randomly.
            #              As well as those must be distinct from agent x.
            a, b, c = np.random.choice(np.delete(range(0, POPULATION_SIZE),x), 3, replace=False)

            R = random.randint(0, N-1) # Pick an random index R from 0 to N-1.

            # --- step 3 : Create new candidate Agent using those Agent a, b and c.
            candidate = []
            # Each params of new Agent will be 
            for i in range(N):
                if (random.random() < CR) or (i == R):
                    # calculate the differential vector.
                    y = population[a].param[i] + F * (population[b].param[i] - population[c].param[i])
                    # if the candidate is in out of range, make it within the range.
                    y = X_RANGE[0] if y < X_RANGE[0] else X_RANGE[1] if y > X_RANGE[1] else y
                    candidate.append(y)
                else:
                    candidate.append(population[x].param[i])
            
            # --- step 4 : Update Agent x with candidate if the fitness of candidate is better than x's
            if fitness(candidate, OBJ_F) < fitness(population[x].getParam(), OBJ_F):
                population[x].setParam(candidate)
                population[x].setFitness(fitness(candidate, OBJ_F))

        # Pick up best and worst agent from current population for ploting.
        best  = min(population, key=attrgetter('fitness'))
        worst = max(population, key=attrgetter('fitness'))

        result_arr.append([best.getFitness(), worst.getFitness()])

        if print_detail:
            print('Generation: {}. Best fitness {}'.format(g_, best.getFitness()))
        
        if g_ % 50 == 0:
            plot_curve(np.array(result_arr), pop_size=POPULATION_SIZE, f=F, cr=CR, d=dim)


    # --- Plot results.
    plot_curve(np.array(result_arr), pop_size=POPULATION_SIZE, f=F, cr=CR, d=dim)

    # --- Print final result.
    best_agent = min(population, key=attrgetter('fitness'))
    print('Params of best agent: {}'.format(best_agent.getParam()))
    print('The best fitness:     {}\n--'.format(best_agent.getFitness()))

    # --- Summarize result to csv.
    with open('results/summary_dim{}.csv'.format(N),'w') as f:
        writer = csv.writer(f, delimiter='\t', lineterminator='\n')
        writer.writerow(['population size', POPULATION_SIZE])
        writer.writerow(['num generation', NUM_GEN])
        writer.writerow(['F', F])
        writer.writerow(['CR', CR])
        writer.writerow(['N', N])
        writer.writerow([' '])
        writer.writerow(['final param'] + best_agent.getParam())
        writer.writerow(['final score', best_agent.getFitness()])

    return best_agent
Esempio n. 44
0
 def arrangeLR(self):
     """左端を基準に、左から右に並べます。
     """
     return self.arrange(attrgetter("left"))
Esempio n. 45
0
def resolve_excel_ref(ref_str, default=_undefined):
    """
    if `ref_str` is an *excel-ref*, it returns the referred-contents as DataFrame or a scalar, `None` otherwise.

    Excel-ref examples::

            @a1
            @E5.column
            @some sheet_name!R1C5.TABLE
            @1!a1:c5.table(header=False)
            @3!a1:C5.horizontal(strict=True; atleast_2d=True)
            @sheet-1!A1.table(asarray=True){columns=['a','b']}
            @any%sheet^&name!A1:A6.vertical(header=True)        ## Setting Range's `header` kw and
                                                                #      DataFrame will parse 1st row as header

    The *excel-ref* syntax is case-insensitive apart from the key-value pairs,
    which it is given in BNF-notation:

    .. productionlist::
            excel_ref   : "@"
                        : [sheet "!"]
                        : cells
                        : ["." shape]
                        : ["(" range_kws ")"]
                        : ["{" df_kws "}"]
            sheet       : sheet_name | sheet_index
            sheet_name  : <any character>
            sheet_index : `integer`
            cells       : cell_ref [":" cell_ref]
            cell_ref    : A1_ref | RC_ref | tuple_ref
            A1_ref      : <ie: "A1" or "BY34">
            RC_ref      : <ie: "R1C1" or "R24C34">
            tuple_ref   : <ie: "(1,1)" or "(24,1)", the 1st is the row>
            shape       : "." ("table" | "vertical" | "horizontal")
            range_kws   : kv_pairs                    # keywords for xlwings.Range(**kws)
            df_kws      : kv_pairs                    # keywords for pandas.DataFrafe(**kws)
            kv_pairs    : <python code for **keywords ie: "a=3.14, f = 'gg'">


    Note that the "RC-notation" is not converted, so Excel may not support it (unless overridden in its options).
    """

    matcher = _excel_ref_specifier_regex.match(ref_str)
    if matcher:
        ref = matcher.groupdict()
        log.info("Parsed string(%s) as Excel-ref: %s", ref_str, ref)

        sheet = ref.get('sheet') or ''
        try:
            sheet = int(sheet)
        except ValueError:
            pass
        cells = ref['ref']
        range_kws = _parse_kws(ref.get('range_kws'))
        ref_range = xw.Range(sheet, cells, **range_kws)
        range_shape = ref.get('shape')
        if range_shape:
            ref_range = operator.attrgetter(range_shape.lower())(ref_range)

        v = ref_range.value

        if ref_range.row1 != ref_range.row2 or ref_range.col1 != ref_range.col2:
            ## Parse as DataFrame when more than one cell.
            #
            pandas_kws = _parse_kws(ref.get('pandas_kws'))
            if 'header' in range_kws and not 'columns' in pandas_kws:
                ##Do not ignore explicit-columns.
                v = pd.DataFrame(v[1:], columns=v[0], **pandas_kws)
            else:
                v = pd.DataFrame(v, **pandas_kws)

        log.debug("Excel-ref(%s) value: %s", ref, v)

        return v
    else:
        if default is _undefined:
            raise ValueError("Invalid excel-ref(%s)!" % ref_str)
        else:
            return default
Esempio n. 46
0
s_li = sorted(li, key=abs)
print(s_li)


class Employee():
    def __init__(self, name, age, salary):
        self.name = name
        self.age = age
        self.salary = salary

    def __repr__(self):
        return '({},{},${})'.format(self.name, self.age, self.salary)


e1 = Employee('Carl', 37, 70000)
e2 = Employee('Sarah', 29, 80000)
e3 = Employee('John', 43, 90000)

employees = [e1, e2, e3]

# def e_sort(emp):
#    return emp.name
#    return emp.age
#    return emp.salary

s_employees = sorted(employees, key=attrgetter('age'))

print(s_employees)

# End
Esempio n. 47
0
#!/usr/local/bin/python3.3
# -*- coding: utf-8 -*-
"""
排序不支持原生比较的对象
"""

# 1. sorted


class User(object):
    def __init__(self, user_id):
        self.user_id = user_id

    def __repr__(self):
        return 'User({})'.format(self.user_id)


users = [User(23), User(3), User(99)]
print(users)
print(sorted(users, key=lambda u: u.user_id))

# 2. operator.attrgetter
from operator import attrgetter, itemgetter

print(sorted(users, key=attrgetter('user_id')))
Esempio n. 48
0
class CapsProfile(Document):
    meta = {
        "collection": "capsprofiles",
        "strict": False,
        "auto_create_index": False
    }

    name = StringField(unique=True)
    description = StringField()
    # Enable snmp protocol discovery
    enable_snmp = BooleanField(default=True)
    enable_snmp_v1 = BooleanField(default=True)
    enable_snmp_v2c = BooleanField(default=True)
    # Enable L2 protocols caps discovery
    enable_l2 = BooleanField(default=False)
    #
    bfd_policy = StringField(
        choices=[
            ("D", "Disable"),  # Always disable
            ("T", "Enable for Topology"
             ),  # Enable if appropriate topology discovery is enabled
            ("E", "Enable"),  # Always enable
        ],
        default="T",
    )
    cdp_policy = StringField(
        choices=[
            ("D", "Disable"),  # Always disable
            ("T", "Enable for Topology"
             ),  # Enable if appropriate topology discovery is enabled
            ("E", "Enable"),  # Always enable
        ],
        default="T",
    )
    fdp_policy = StringField(
        choices=[
            ("D", "Disable"),  # Always disable
            ("T", "Enable for Topology"
             ),  # Enable if appropriate topology discovery is enabled
            ("E", "Enable"),  # Always enable
        ],
        default="T",
    )
    huawei_ndp_policy = StringField(
        choices=[
            ("D", "Disable"),  # Always disable
            ("T", "Enable for Topology"
             ),  # Enable if appropriate topology discovery is enabled
            ("E", "Enable"),  # Always enable
        ],
        default="T",
    )
    lacp_policy = StringField(
        choices=[
            ("D", "Disable"),  # Always disable
            ("T", "Enable for Topology"
             ),  # Enable if appropriate topology discovery is enabled
            ("E", "Enable"),  # Always enable
        ],
        default="T",
    )
    lldp_policy = StringField(
        choices=[
            ("D", "Disable"),  # Always disable
            ("T", "Enable for Topology"
             ),  # Enable if appropriate topology discovery is enabled
            ("E", "Enable"),  # Always enable
        ],
        default="T",
    )
    oam_policy = StringField(
        choices=[
            ("D", "Disable"),  # Always disable
            ("T", "Enable for Topology"
             ),  # Enable if appropriate topology discovery is enabled
            ("E", "Enable"),  # Always enable
        ],
        default="T",
    )
    rep_policy = StringField(
        choices=[
            ("D", "Disable"),  # Always disable
            ("T", "Enable for Topology"
             ),  # Enable if appropriate topology discovery is enabled
            ("E", "Enable"),  # Always enable
        ],
        default="T",
    )
    stp_policy = StringField(
        choices=[
            ("D", "Disable"),  # Always disable
            ("T", "Enable for Topology"
             ),  # Enable if appropriate topology discovery is enabled
            ("E", "Enable"),  # Always enable
        ],
        default="T",
    )
    udld_policy = StringField(
        choices=[
            ("D", "Disable"),  # Always disable
            ("T", "Enable for Topology"
             ),  # Enable if appropriate topology discovery is enabled
            ("E", "Enable"),  # Always enable
        ],
        default="T",
    )
    # Enable L3 protocols caps discovery
    enable_l3 = BooleanField(default=False)
    hsrp_policy = StringField(
        choices=[
            ("D", "Disable"),  # Always disable
            ("T", "Enable for Topology"
             ),  # Enable if appropriate topology discovery is enabled
            ("E", "Enable"),  # Always enable
        ],
        default="T",
    )
    vrrp_policy = StringField(
        choices=[
            ("D", "Disable"),  # Always disable
            ("T", "Enable for Topology"
             ),  # Enable if appropriate topology discovery is enabled
            ("E", "Enable"),  # Always enable
        ],
        default="T",
    )
    vrrpv3_policy = StringField(
        choices=[
            ("D", "Disable"),  # Always disable
            ("T", "Enable for Topology"
             ),  # Enable if appropriate topology discovery is enabled
            ("E", "Enable"),  # Always enable
        ],
        default="T",
    )
    bgp_policy = StringField(
        choices=[
            ("D", "Disable"),  # Always disable
            ("T", "Enable for Topology"
             ),  # Enable if appropriate topology discovery is enabled
            ("E", "Enable"),  # Always enable
        ],
        default="T",
    )
    ospf_policy = StringField(
        choices=[
            ("D", "Disable"),  # Always disable
            ("T", "Enable for Topology"
             ),  # Enable if appropriate topology discovery is enabled
            ("E", "Enable"),  # Always enable
        ],
        default="T",
    )
    ospfv3_policy = StringField(
        choices=[
            ("D", "Disable"),  # Always disable
            ("T", "Enable for Topology"
             ),  # Enable if appropriate topology discovery is enabled
            ("E", "Enable"),  # Always enable
        ],
        default="T",
    )
    isis_policy = StringField(
        choices=[
            ("D", "Disable"),  # Always disable
            ("T", "Enable for Topology"
             ),  # Enable if appropriate topology discovery is enabled
            ("E", "Enable"),  # Always enable
        ],
        default="T",
    )
    ldp_policy = StringField(
        choices=[
            ("D", "Disable"),  # Always disable
            ("T", "Enable for Topology"
             ),  # Enable if appropriate topology discovery is enabled
            ("E", "Enable"),  # Always enable
        ],
        default="T",
    )
    rsvp_policy = StringField(
        choices=[
            ("D", "Disable"),  # Always disable
            ("T", "Enable for Topology"
             ),  # Enable if appropriate topology discovery is enabled
            ("E", "Enable"),  # Always enable
        ],
        default="T",
    )

    L2_SECTIONS = [
        "bfd", "cdp", "fdp", "huawei_ndp", "lacp", "lldp", "oam", "rep", "stp",
        "udld"
    ]
    L3_SECTIONS = [
        "hsrp", "vrrp", "vrrpv3", "bgp", "ospf", "ospfv3", "isis", "ldp",
        "rsvp"
    ]

    _id_cache = cachetools.TTLCache(maxsize=100, ttl=60)
    _default_cache = cachetools.TTLCache(maxsize=100, ttl=60)

    DEFAULT_PROFILE_NAME = "default"

    def __str__(self):
        return self.name

    @classmethod
    @cachetools.cachedmethod(operator.attrgetter("_id_cache"),
                             lock=lambda _: id_lock)
    def get_by_id(cls, id):
        return CapsProfile.objects.filter(id=id).first()

    @classmethod
    @cachetools.cachedmethod(operator.attrgetter("_default_cache"),
                             lock=lambda _: id_lock)
    def get_default_profile(cls):
        return CapsProfile.objects.filter(
            name=cls.DEFAULT_PROFILE_NAME).first()

    def get_sections(self, mop, nsp):
        """
        Returns a list of enabled sections
        :param mop: Managed Object Profile instance
        :param nsp: Network Segment Profile instance
        :return: List of string
        """
        def l2_is_enabled(method):
            cp = getattr(self, "%s_policy" % method)
            if cp == "E":
                return True
            if cp == "D":
                return False
            mopp = getattr(mop, "enable_box_discovery_%s" % method)
            if not mopp:
                return False
            tm = nsp.get_topology_methods()
            return method in tm

        def l3_is_enabled(method):
            cp = getattr(self, "%s_policy" % method)
            # Treat `T` policy as `E` temporarily
            return cp != "D"

        r = []
        if self.enable_snmp:
            r += ["snmp"]
            if self.enable_snmp_v1:
                r += ["snmp_v1"]
            if self.enable_snmp_v2c:
                r += ["snmp_v2c"]
        if self.enable_l2:
            r += [m for m in self.L2_SECTIONS if l2_is_enabled(m)]
        if self.enable_l3:
            r += [m for m in self.L3_SECTIONS if l3_is_enabled(m)]
        return r
Esempio n. 49
0
def test_operator():
    c = ConfigDict({"a": 1, "b": {"c": 3}})
    from operator import attrgetter
    assert attrgetter('b.c')(c) == 3
Esempio n. 50
0
    def get_latest_lockfile(self):
        # type: () -> Dict[str, Dict[str, Union[List[str], str]]]
        lockfile = {}
        constraints = {dep.name: dep.specifier for dep in self.dependencies}
        deps, _ = self.pin_dependencies()
        for dep in deps:
            dep = dep.get_dependencies()
            for sub_dep in dep.dependencies:
                if sub_dep.name not in constraints:
                    logger.info(
                        "Adding {0} (from {1}) {2!s}".format(
                            sub_dep.name, dep.name, sub_dep.specifier
                        )
                    )
                    constraints[sub_dep.name] = sub_dep.specifier
                else:
                    existing = "{0} (from {1}): {2!s} + ".format(
                        sub_dep.name, dep.name, constraints[sub_dep.name]
                    )
                    new_specifier = sub_dep.specifier
                    merged = constraints[sub_dep.name] & new_specifier
                    logger.info(
                        "Updating: {0}{1!s} = {2!s}".format(
                            existing, new_specifier, merged
                        )
                    )
                    constraints[sub_dep.name] = merged

            lockfile.update({dep.info.name: dep.releases.get_latest_lockfile()})
        for sub_dep_name, specset in constraints.items():
            try:
                sub_dep_pkg = get_package(sub_dep_name)
            except requests.exceptions.HTTPError:
                continue
            logger.info("Getting package: {0} ({1!s})".format(sub_dep, specset))
            sorted_releases = list(
                sorted(
                    sub_dep_pkg.releases,
                    key=operator.attrgetter("parsed_version"),
                    reverse=True,
                )
            )
            try:
                version = next(iter(specset.filter((r.version for r in sorted_releases))))
            except StopIteration:
                logger.info(
                    "No version of {0} matches specifier: {1}".format(sub_dep, specset)
                )
                logger.info(
                    "Available versions: {0}".format(
                        " ".join([r.version for r in sorted_releases])
                    )
                )
                raise
            sub_dep_instance = get_package_version(sub_dep_name, version=str(version))
            if sub_dep_instance is None:
                continue
            lockfile.update(
                {
                    sub_dep_instance.info.name: sub_dep_instance.releases.get_latest_lockfile()
                }
            )
            # lockfile.update(dep.get_latest_lockfile())
        lockfile.update({self.info.name: self.releases.get_latest_lockfile()})
        return lockfile
Esempio n. 51
0
def main():
	parser = make_argument_parser()
	args = parser.parse_args()
	out = sys.stdout

	def vprint(message):
		if args.verbose:
			print(message, file=sys.stderr)

	if len(sys.argv) == 1:
		parser.print_help(file=sys.stderr)
		sys.exit()

	if args.output is not None:
		try:
			out = open(args.output, 'w', encoding='utf-8', newline='')
		except IOError as e:
			print(f'Error writing to "{args.output}": {e.strerror}', file=sys.stderr)
			sys.exit(-1)

	try:
		encoder = make_encoder(args, out)
		assert(encoder is not None)
		encoder.begin()

		nrecords = 0
		for pattern in args.files:
			path = None
			for path in glob(pattern):
				vprint(f'Parsing file "{path}"')

				try:
					data = None
					with open(path, 'rb') as f:
						data = parse_lab(f)
						data.append(make_field('Metadata/Source', os.path.abspath(path)))

					if data is None:
						vprint(f'There was an error trying to parse "{path}", skipping.')
						continue

					encoder.begin_record()
					for field in sorted(data, key=attrgetter('name')):
						field.encode(make_field_encoder(encoder, field))
					encoder.end_record()
					nrecords += 1

				except RuntimeError:
					print(f'There was an error trying to open "{path}", skipping...', file=sys.stderr)

			if not path:
				print(f'No files found matching "{pattern}"', file=sys.stderr)

		encoder.end()

		if args.output is not None:
			if nrecords == 0:
				print(f'No output files were generated', file=sys.stderr)
				sys.exit(-1)

			vprint(f'Writing data to "{args.output}"')

	finally:
		if out is not sys.stdout:
			out.close()
Esempio n. 52
0
 def api(self):
     """ Return the current Api under test. """
     f = attrgetter(self.api_name)
     return f(self.zenpy_client)
Esempio n. 53
0
import operator
import unittest.mock as mock
import numpy
from openquake.baselib import hdf5, datastore, general, performance
from openquake.hazardlib.gsim.base import ContextMaker, FarAwayRupture
from openquake.hazardlib import calc, probability_map, stats
from openquake.hazardlib.source.rupture import (
    EBRupture, BaseRupture, events_dt, RuptureProxy)
from openquake.risklib.riskinput import rsi2str
from openquake.commonlib.calc import gmvs_to_poes

U16 = numpy.uint16
U32 = numpy.uint32
F32 = numpy.float32
by_taxonomy = operator.attrgetter('taxonomy')
code2cls = BaseRupture.init()
weight = operator.attrgetter('weight')


def build_stat_curve(poes, imtls, stat, weights):
    """
    Build statistics by taking into account IMT-dependent weights
    """
    assert len(poes) == len(weights), (len(poes), len(weights))
    L = len(imtls.array)
    array = numpy.zeros((L, 1))
    if isinstance(weights, list):  # IMT-dependent weights
        # this is slower since the arrays are shorter
        for imt in imtls:
            slc = imtls(imt)
Esempio n. 54
0
from indico.modules.events.contributions import Contribution
from indico.modules.events.papers.controllers.base import RHJudgingAreaBase
from indico.modules.events.papers.forms import BulkPaperJudgmentForm
from indico.modules.events.papers.lists import PaperJudgingAreaListGeneratorDisplay, PaperAssignmentListGenerator
from indico.modules.events.papers.models.revisions import PaperRevisionState
from indico.modules.events.papers.operations import judge_paper, update_reviewing_roles
from indico.modules.events.papers.settings import PaperReviewingRole
from indico.modules.events.papers.views import WPDisplayJudgingArea, WPManagePapers
from indico.modules.events.util import ZipGeneratorMixin
from indico.util.fs import secure_filename
from indico.util.i18n import ngettext, _
from indico.web.util import jsonify_data, jsonify_template, jsonify_form


CFP_ROLE_MAP = {
    PaperReviewingRole.judge: attrgetter('judges'),
    PaperReviewingRole.content_reviewer: attrgetter('content_reviewers'),
    PaperReviewingRole.layout_reviewer: attrgetter('layout_reviewers'),
}

CONTRIB_ROLE_MAP = {
    PaperReviewingRole.judge: attrgetter('paper_judges'),
    PaperReviewingRole.content_reviewer: attrgetter('paper_content_reviewers'),
    PaperReviewingRole.layout_reviewer: attrgetter('paper_layout_reviewers'),
}


class RHPapersListBase(RHJudgingAreaBase):
    """Base class for assignment/judging paper lists"""

    @cached_property
Esempio n. 55
0
 def get_last_payment(self):
     return max(self.payments.all(), default=None, key=attrgetter("pk"))
Esempio n. 56
0
 def evaluate(self, namespace: LazyNS) -> Any:
     return operator.attrgetter(self.attr)(evaluate(self.target, namespace))
Esempio n. 57
0
def participation_data(start_date=None,
                       end_date=None,
                       disciplines=None,
                       race_classes=None,
                       organizers=None,
                       include_labels=None,
                       exclude_labels=None):
    competitions = get_competitions(start_date, end_date, disciplines,
                                    race_classes, organizers, include_labels,
                                    exclude_labels)
    license_holders_event_errors = set()

    data = []
    license_holders_attendance_total = defaultdict(int)
    license_holders_men_total = defaultdict(int)
    license_holders_women_total = defaultdict(int)

    category_total_overall = defaultdict(int)
    category_competition_total = defaultdict(lambda: defaultdict(int))
    event_competition_participants_total = defaultdict(
        lambda: defaultdict(int))
    competition_attendee_total = defaultdict(int)
    competition_participants_total = defaultdict(int)
    category_max_overall = defaultdict(int)

    competition_category_event = defaultdict(dict)

    age_increment = 5
    age_range_license_holders = [set() for i in range(0, 120, age_increment)]
    age_range_attendee_count = [0 for i in range(0, 120, age_increment)]
    age_range_men_license_holders = [
        set() for i in range(0, 120, age_increment)
    ]
    age_range_men_attendee_count = [0 for i in range(0, 120, age_increment)]
    age_range_women_license_holders = [
        set() for i in range(0, 120, age_increment)
    ]
    age_range_women_attendee_count = [0 for i in range(0, 120, age_increment)]
    license_holders_set = set()

    profile_year = 0
    participants_total = 0
    prereg_participants_total = 0
    competitions_total, events_total = 0, 0

    discipline_overall = defaultdict(set)
    discipline_men = defaultdict(set)
    discipline_women = defaultdict(set)
    discipline_bucket = defaultdict(lambda: defaultdict(set))

    def fix_age(age):
        return max(min(age, 119), 0)

    for competition in competitions:
        if not competition.has_participants():
            continue

        discipline_name = competition.discipline.name

        competitions_total += 1
        profile_year = max(profile_year, competition.start_date.year)

        competition_data = {
            'name': competition.name,
            'start_date': competition.start_date.strftime('%Y-%m-%d'),
            'events': [],
            'attendees_men': 0,
            'attendees_women': 0,
            'attendees_total': 0,
            'participants_men': 0,
            'participants_women': 0,
            'participants_total': 0,
            'participants_men_dnf': 0,
            'participants_women_dnf': 0,
            'participants_total_dnf': 0,
            'participants_men_dns': 0,
            'participants_women_dns': 0,
            'participants_total_dns': 0,
            'prereg_participants_men': 0,
            'prereg_participants_women': 0,
            'participants_paid_seasons_pass': 0,
            'participants_paid_on_venue': 0,
            'participants_unpaid': 0,
        }
        for event in competition.get_events():
            if not event.has_participants():
                continue

            events_total += 1

            attendee_data = []
            participant_data = []
            prereg_participant_data = []
            event_license_holders = set()
            for participant in event.get_participants():

                # Participation.
                license_holder = participant.license_holder
                age = event.date_time.year - license_holder.date_of_birth.year
                if not (7 < age < 100):
                    license_holders_event_errors.add((license_holder, event))
                age = fix_age(age)

                event_competition_participants_total[competition][event] += 1
                category_name = participant.category.code_gender if participant.category else u'{}'.format(
                    _('Unknown'))
                category_total_overall[category_name] += 1
                category_competition_total[competition][category_name] += 1
                competition_category_event[competition][
                    category_name] = event.name
                competition_participants_total[competition] += 1
                participants_total += 1
                participant_data.append([license_holder.gender, age])
                if participant.preregistered:
                    prereg_participants_total += 1
                    prereg_participant_data.append(
                        [license_holder.gender, age])

                if license_holder in event_license_holders:
                    continue

                # Attendance: definition: a LicenseHolder that attends a Competition
                event_license_holders.add(license_holder)

                attendee_data.append([license_holder.gender, age])
                license_holders_set.add(license_holder)
                license_holders_attendance_total[license_holder] += 1

                bucket = age // age_increment

                age_range_license_holders[bucket].add(license_holder)
                age_range_attendee_count[bucket] += 1

                competition_attendee_total[competition] += 1

                discipline_overall[discipline_name].add(license_holder)
                discipline_bucket[discipline_name][bucket].add(license_holder)

                if license_holder.gender == 0:
                    license_holders_men_total[license_holder] += 1
                    age_range_men_license_holders[bucket].add(license_holder)
                    age_range_men_attendee_count[bucket] += 1
                    discipline_men[discipline_name].add(license_holder)
                else:
                    license_holders_women_total[license_holder] += 1
                    age_range_women_license_holders[bucket].add(license_holder)
                    age_range_women_attendee_count[bucket] += 1
                    discipline_women[discipline_name].add(license_holder)

            event_men_dnf = event.get_results().filter(
                status=Result.cDNF,
                participant__license_holder__gender=0).count()
            event_men_dns = event.get_results().filter(
                status=Result.cDNS,
                participant__license_holder__gender=0).count()
            event_women_dnf = event.get_results().filter(
                status=Result.cDNF,
                participant__license_holder__gender=1).count()
            event_women_dns = event.get_results().filter(
                status=Result.cDNS,
                participant__license_holder__gender=1).count()

            event_data = {
                'name':
                event.name,
                'attendees':
                attendee_data,
                'attendees_men':
                sum(1 for p in attendee_data if p[0] == 0),
                'attendees_women':
                sum(1 for p in attendee_data if p[0] == 1),
                'participants_men':
                sum(1 for p in participant_data if p[0] == 0),
                'participants_women':
                sum(1 for p in participant_data if p[0] == 1),
                'prereg_participants_men':
                sum(1 for p in prereg_participant_data if p[0] == 0),
                'prereg_participants_women':
                sum(1 for p in prereg_participant_data if p[0] == 1),
                'men_dnf':
                event_men_dnf,
                'men_dns':
                event_men_dns,
                'women_dnf':
                event_women_dnf,
                'women_dns':
                event_women_dns,
                'total_dnf':
                event_men_dnf + event_women_dnf,
                'total_dns':
                event_men_dns + event_women_dns,
            }
            event_data['attendees_total'] = event_data[
                'attendees_men'] + event_data['attendees_women']
            event_data['participants_total'] = event_data[
                'participants_men'] + event_data['participants_women']
            event_data['prereg_participants_total'] = event_data[
                'prereg_participants_men'] + event_data[
                    'prereg_participants_women']

            competition_data['attendees_men'] += event_data['attendees_men']
            competition_data['attendees_women'] += event_data[
                'attendees_women']

            competition_data['participants_men'] += event_data[
                'participants_men']
            competition_data['participants_women'] += event_data[
                'participants_women']

            competition_data['participants_men_dnf'] += event_data['men_dnf']
            competition_data['participants_men_dns'] += event_data['men_dns']
            competition_data['participants_women_dnf'] += event_data[
                'women_dnf']
            competition_data['participants_women_dns'] += event_data[
                'women_dns']

            competition_data['prereg_participants_men'] += event_data[
                'prereg_participants_men']
            competition_data['prereg_participants_women'] += event_data[
                'prereg_participants_women']

            competition_data['events'].append(event_data)

        competition_data['attendees_total'] = competition_data[
            'attendees_men'] + competition_data['attendees_women']
        competition_data['participants_total'] = competition_data[
            'participants_men'] + competition_data['participants_women']
        competition_data['prereg_participants_total'] = competition_data[
            'prereg_participants_men'] + competition_data[
                'prereg_participants_women']
        competition_data['participants_total_dnf'] = competition_data[
            'participants_men_dnf'] + competition_data['participants_women_dnf']
        competition_data['participants_total_dns'] = competition_data[
            'participants_men_dns'] + competition_data['participants_women_dns']

        #------------------------------------------------
        if competition.seasons_pass:
            competition_data[
                'participants_paid_seasons_pass'] = Participant.objects.filter(
                    competition=competition,
                    role=Participant.Competitor,
                    paid=True,
                    license_holder__in=SeasonsPassHolder.objects.filter(
                        seasons_pass=competition.seasons_pass).values_list(
                            'license_holder', flat=True)).count()
            competition_data[
                'participants_paid_on_venue'] = Participant.objects.filter(
                    competition=competition,
                    role=Participant.Competitor,
                    paid=True).exclude(
                        license_holder__in=SeasonsPassHolder.objects.filter(
                            seasons_pass=competition.seasons_pass).values_list(
                                'license_holder', flat=True)).count()
        else:
            competition_data[
                'participants_paid_on_venue'] = Participant.objects.filter(
                    competition=competition,
                    role=Participant.Competitor,
                    paid=True).count()
        competition_data['participants_unpaid'] = Participant.objects.filter(
            competition=competition, paid=False).count()

        data.append(competition_data)

    age_range_average = [
        0 if not age_range_license_holders[i] else
        age_range_attendee_count[i] / float(len(age_range_license_holders[i]))
        for i in range(len(age_range_attendee_count))
    ]
    age_range_men_average = [
        0 if not age_range_men_license_holders[i] else
        age_range_men_attendee_count[i] /
        float(len(age_range_men_license_holders[i]))
        for i in range(len(age_range_men_attendee_count))
    ]
    age_range_women_average = [
        0 if not age_range_women_license_holders[i] else
        age_range_women_attendee_count[i] /
        float(len(age_range_women_license_holders[i]))
        for i in range(len(age_range_women_attendee_count))
    ]

    def trim_right_zeros(a):
        for i in range(len(a) - 1, -1, -1):
            if a[i]:
                del a[i + 1:]
                break

    trim_right_zeros(age_range_average)
    trim_right_zeros(age_range_men_average)
    trim_right_zeros(age_range_women_average)

    license_holder_profile = []
    license_holder_men_profile = []
    license_holder_women_profile = []
    if profile_year:
        license_holder_profile = sorted(
            fix_age(profile_year - lh.date_of_birth.year)
            for lh in license_holders_set)
        license_holder_men_profile = sorted(
            fix_age(profile_year - lh.date_of_birth.year)
            for lh in license_holders_set if lh.gender == 0)
        license_holder_women_profile = sorted(
            fix_age(profile_year - lh.date_of_birth.year)
            for lh in license_holders_set if lh.gender == 1)
    else:
        profile_year = timezone.localtime(timezone.now()).date().year

    attendees_total = sum(c['attendees_total'] for c in data)

    def format_int_percent(num, total):
        return {
            'v':
            num,
            'f':
            '{} / {} ({:.2f}%)'.format(num, total,
                                       (100.0 * num) / (total or 1))
        }

    def format_int_percent_event(num, total, event):
        return {
            'v':
            num,
            'f':
            '{} / {} ({:.2f}%) - {}'.format(num, total,
                                            (100.0 * num) / (total or 1),
                                            event)
        }

    def format_event_int_percent(num, total, event):
        return {
            'v':
            num,
            'f':
            '{}: {} / {} ({:.2f}%)'.format(event, num, total,
                                           (100.0 * num) / (total or 1))
        }

    def format_percent(num, total):
        percent = 100.0 * float(num) / float(total if total else 1.0)
        return {'v': percent, 'f': '{:.2f}%'.format(percent)}

    # Initialize the category total.
    category_total = [['Category', 'Total']] + sorted(
        ([k, v] for k, v in category_total_overall.items()),
        key=lambda x: x[1],
        reverse=True)
    category_total_men = [['Category', 'Total']] + [[
        re.sub(r' \(Men\)$', '', c), t
    ] for c, t in category_total[1:] if c.endswith(' (Men)')]
    category_total_women = [['Category', 'Total']] + [[
        re.sub(r' \(Women\)$', '', c), t
    ] for c, t in category_total[1:] if c.endswith(' (Women)')]
    category_total_open = [['Category', 'Total']] + [[
        re.sub(r' \(Open\)$', '', c), t
    ] for c, t in category_total[1:] if c.endswith(' (Open)')]

    #--------------
    ccc = [['Competition'] + [name for name, count in category_total[1:]]]
    for competition in sorted(category_competition_total.keys(),
                              key=operator.attrgetter('start_date')):
        ccc.append([competition.name] + [
            format_int_percent_event(
                category_competition_total[competition].get(name, 0),
                competition_participants_total[competition],
                competition_category_event.get(competition, {}).get(name, ''),
            )
            if category_competition_total[competition].get(name, 0) != 0 else 0
            for name, count in category_total[1:]
        ])

    # Add cumulative percent to the category total.
    for ct in [
            category_total, category_total_men, category_total_women,
            category_total_open
    ]:
        ct[0].append('Cumulative %')
        ct_total = sum(t for c, t in ct[1:])
        cumulativePercent = 0.0
        for c in ct[1:]:
            cumulativePercent += 100.0 * c[-1] / ct_total
            c.append(cumulativePercent)

    event_max = max(
        len(events) for events in event_competition_participants_total.values(
        )) if event_competition_participants_total else 0
    eee = [['Competition'] + ['{}'.format(i + 1) for i in range(event_max)]]
    for competition in sorted((event_competition_participants_total.keys()),
                              key=operator.attrgetter('start_date')):
        events = sorted(
            ((event, count) for event, count in
             event_competition_participants_total[competition].items()),
            key=lambda x: x[0].date_time)
        eee.append([competition.name] + [
            format_event_int_percent(
                events[i][1],
                competition_participants_total[competition],
                events[i][0].name,
            ) if i < len(events) else 0 for i in range(event_max)
        ])

    def get_expected_age(ac):
        if not ac:
            return None
        most_frequent = max(v for v in ac.values())
        for a, c in ac.items():
            if c == most_frequent:
                return a
        return None

    # Create a postal code hierarchy.
    postal_codes = defaultdict(int)
    for lh in license_holders_attendance_total.keys():
        postal_codes['Unknown' if not lh.zip_postal else lh.zip_postal.
                     replace(' ', '')[:4]] += 1
    postal_code_data = [[
        '/All/' + ('Unknown' if p == 'Unknown' else '/'.join(
            p[:i] for i in range(1,
                                 len(p) + 1))), total
    ] for p, total in postal_codes.items()]

    #-----------------------------------------------
    # Discipline data.
    #
    def safe_union(*args):
        return set.union(*args) if args else set()

    discipline_total = len(
        safe_union(*[v for v in discipline_overall.values()]))
    discipline_men_total = len(
        safe_union(*[v for v in discipline_men.values()]))
    discipline_women_total = len(
        safe_union(*[v for v in discipline_women.values()]))

    discipline_used = list(discipline_overall.keys())
    discipline_used.sort(key=lambda d: len(discipline_overall[d]),
                         reverse=True)

    discipline_overall = [[
        d, format_percent(len(discipline_overall[d]), discipline_total)
    ] for d in discipline_used]
    discipline_overall.insert(0, ['Discipline', 'All License Holders'])
    discipline_gender = [[
        d,
        format_percent(len(discipline_men.get(d, set())),
                       discipline_men_total),
        format_percent(len(discipline_women.get(d, set())),
                       discipline_women_total)
    ] for d in discipline_used]
    discipline_gender.insert(0, ['Discipline', 'Men', 'Women'])

    buckets_used = safe_union(
        *
        [set(b for b in discipline_bucket[d].keys()) for d in discipline_used])
    bucket_min = min(buckets_used) if buckets_used else 0
    bucket_max = max(buckets_used) + 1 if buckets_used else 0
    discipline_bucket_total = {
        b: len(
            safe_union(
                *[discipline_bucket[d].get(b, set())
                  for d in discipline_used]))
        for b in range(bucket_min, bucket_max)
    }

    discipline_age = [[d] + [
        format_percent(len(discipline_bucket[d].get(b, set())),
                       discipline_bucket_total.get(b, 0))
        for b in range(bucket_min, bucket_max)
    ] for d in discipline_used]
    discipline_age.insert(0, ['Discipline'] + [
        '{}-{}'.format(b * age_increment, (b + 1) * age_increment - 1)
        for b in range(bucket_min, bucket_max)
    ])

    #-----------------------------------------------
    # Average/Max Category
    #
    def getCategoryAverageMax():
        cct = defaultdict(lambda: defaultdict(int))
        for competition, category_name_total in category_competition_total.items(
        ):
            for category_name, total in category_name_total.items():
                cct[category_name][competition] = total
        category_average_max = [[
            category_name,
            sum(competition_total.values()) / float(len(competition_total)),
            max(competition_total.values()),
        ] for category_name, competition_total in cct.items()]
        category_average_max.sort(key=operator.itemgetter(2), reverse=True)
        #category_average_max.insert( 0, ['Category', 'Ave', 'Max'] )
        return category_average_max

    category_average_max = getCategoryAverageMax()

    payload = {
        'competitions_total':
        competitions_total,
        'events_total':
        events_total,
        'attendees_total':
        attendees_total,
        'attendees_men_total':
        sum(c['attendees_men'] for c in data),
        'attendees_women_total':
        sum(c['attendees_women'] for c in data),
        'participants_total':
        participants_total,
        'prereg_participants_total':
        prereg_participants_total,
        'license_holders_attendance_total':
        len(license_holders_attendance_total),
        'license_holders_men_total':
        len(license_holders_men_total),
        'license_holders_women_total':
        len(license_holders_women_total),
        'attendance_average':
        sum(v for v in license_holders_attendance_total.values()) /
        (float(len(license_holders_attendance_total)) or 1),
        'attendance_men_average':
        sum(v for v in license_holders_men_total.values()) /
        (float(len(license_holders_men_total)) or 1),
        'attendance_women_average':
        sum(v for v in license_holders_women_total.values()) /
        (float(len(license_holders_women_total)) or 1),
        'age_range_average':
        age_range_average,
        'age_range_men_average':
        age_range_men_average,
        'age_range_women_average':
        age_range_women_average,
        'age_increment':
        age_increment,
        'profile_year':
        profile_year,
        'license_holder_profile':
        license_holder_profile,
        'license_holder_men_profile':
        license_holder_men_profile,
        'license_holder_women_profile':
        license_holder_women_profile,
        'category_total':
        category_total,
        'category_total_men':
        category_total_men,
        'category_total_women':
        category_total_women,
        'category_total_open':
        category_total_open,
        'category_competition_count':
        ccc,
        'event_competition_count':
        eee,
        'postal_code_data':
        postal_code_data,
        'postal_codes': [[k, v] for k, v in postal_codes.items()
                         if k != 'Unknown'],
        'competitions':
        data,
        'discipline_total':
        discipline_total,
        'discipline_overall':
        discipline_overall,
        'discipline_gender':
        discipline_gender,
        'discipline_age':
        discipline_age,
        'discipline_used_len':
        len(discipline_used),
        'category_average_max':
        category_average_max,
    }
    return payload, sorted(
        license_holders_event_errors,
        key=lambda x:
        (x[1].date_time, x[0].date_of_birth)), competitions.order_by(
            '-start_date')
Esempio n. 58
0
 def aggregate(cls, stats):
     return SummaryStatistics(*map(sum, [
         map(attrgetter(attr), stats)
         for attr in ("num_matches", "num_only_guessed", "num_only_ref")
     ]))
Esempio n. 59
0
def main():
    import argparse
    parser = argparse.ArgumentParser(
        description="Create callgraph profile from log file",
        usage="%prog [Options] LOGFILE [LOGFILE]")

    arg = parser.add_argument
    arg('files', nargs='*', help='list of files to process')

    arg("-m",
        "--min",
        action="store",
        type="float",
        help="only show entries with VMem>MIN in kB")

    arg("-d",
        "--diff",
        action="store_true",
        help="show difference (only for two files)")

    arg("-s",
        "--self",
        action="store_true",
        help="use self instead of inclusive VMem for sorting/filtering")

    arg("-l",
        "--libself",
        action="store_true",
        help="include libraries into self-VMem")

    arg("-c",
        "--slice",
        action="store",
        default="ini",
        help="slice to analyze [ini]")

    arg("-f", "--flat", action="store_true", help="do not indent tree")

    global opt
    opt = parser.parse_args()

    if len(opt.files) == 0:
        parser.print_help()
        return 1

    if opt.diff and len(opt.files) != 2:
        print "Can only calculate difference if two files are given"
        return 1

    slices = [opt.slice]
    if opt.slice == 'ini':
        slices += ['cbk', 'dso']

    # evt slice
    if opt.slice == 'evt':
        table = []
        for f in opt.files:
            table.append(calcEventAvg(readEvents(open(f, 'r')))[:])

        #diff(table, opt, operator.attrgetter('nmalloc'))
        printTable(table[0], opt)
        return 0

    # Read files
    resTreeList = []
    for f in opt.files:
        l = []
        fstream = pmon_ser.extract_pmon_files(f)['data']
        getResUser(fstream, l, slices)
        del fstream
        resTreeList.append(l[:])

        # Calculate self-VMem
        if not opt.libself: children = [SharedLib]
        else: children = None
        for r in resTreeList[-1]:
            r.calcSelf(children)

    # Diff
    if len(opt.files) > 1:

        print '#' * 80
        for i, f in enumerate(opt.files):
            print "# [%d] %s" % (i + 1, f)
        if opt.diff:
            print "# [3] difference [2]-[1]"
        print '#' * 80

        table = [getCompList(t, []) for t in resTreeList]
        if opt.self:
            diff(table, opt, operator.attrgetter('dvmem_self'))
        else:
            diff(table, opt)
        return 0

    # Only one file
    resTree = resTreeList[0]

    if opt.min != None:
        # Use VMem or self-VMem for filtering
        vmem = lambda c: c.dvmem_self if (opt.self == True and hasattr(
            c, 'dvmem_self')) else c.dvmem
        for r in resTree:
            r.show(lambda c: vmem(c) > opt.min)
    else:
        for r in resTree:
            r.show()

    return 0
Esempio n. 60
0
def GreedyEdgeSelection(V, E, maxVperTree=-1):  #TODO abort after maxVperTree

    colors = [-1] * (V + 1)
    #Sort Edges by weight
    #print "Sorting"
    import operator
    E.sort(key=operator.attrgetter('weight'), reverse=True)
    #print E

    #Initialize
    unusedColor = 0
    T = []  #indexed by color

    #Select
    for e in E:
        #print "Current colors", colors
        #print "Current trees", T
        #print "*" * 10
        #print "Trying to add", e
        Vi = colors[e.i]
        Vj = colors[e.j]
        if (Vi == -1) and (Vj == -1):
            #Assum maxVperTree >= 2, Don't need to check tree size here
            colors[e.i] = unusedColor
            colors[e.j] = unusedColor
            T.append([e.i, e.j])
            #print "Added", e
            unusedColor += 1
            continue
        if (Vi == -1):
            if not Vj in getOtherNeighborColors([e.i], e, E, colors):
                if (maxVperTree == -1) or (len(T[Vj]) < maxVperTree):
                    colors[e.i] = Vj
                    T[Vj].append(e.i)
                    #print "Added", e
            continue
        if (Vj == -1):
            if (not Vi in getOtherNeighborColors([e.j], e, E, colors)):
                if (maxVperTree == -1) or (len(T[Vi]) < maxVperTree):
                    colors[e.j] = Vi
                    T[Vi].append(e.j)
                    #print "Added", e
            continue
        #Now neither Vi nor Vj is 0
        #They must not be the same, or we have screwed up
        if Vi == Vj:
            print "Error!"
            print e.i, Vi
            print e.j, Vj
            import sys
            sys.exit()

        if not Vi in getOtherNeighborColors(T[Vj], e, E, colors):
            if (maxVperTree == -1) or (len(T[Vi]) + len(T[Vj]) <= maxVperTree):
                for k in T[Vj]:
                    colors[k] = Vi
                T[Vi].extend(T[Vj])
                T[Vj] = []
                #print "Added", e

    #Add disconnected nodes
    for i in range(1, V + 1):
        if colors[i] == -1:
            T.append([i])

    return T