コード例 #1
1
ファイル: unitedit.py プロジェクト: deactivated/pcbre
class UnitGroup(object):
    def __init__(self, items, default_index=0):
        self.units = OrderedDict(items)
        self.by_name = dict(reversed(i) for i in enumerate(self.units.keys()))
        self.by_scalefactor = dict(reversed(i) for i in enumerate(self.units.values()))
        self.__default_index = default_index

    def idx_by_name(self, name):
        return self.by_name[name]

    def idx_by_scale(self, scale):
        return self.by_scale[scale]

    def get_scale(self, idx):
        return list(self.units.values())[idx]

    def get_name(self, idx):
        return list(self.units.keys())[idx]

    @property
    def names(self):
        return list(self.units.keys())

    @property
    def default_index(self):
        return self.__default_index
コード例 #2
1
ファイル: reader.py プロジェクト: karolmajta/epicjs-api
def parse_meditation(lines):
    lines = [l for l in lines if l.strip() if l.strip()]
    title_line = lines[0]
    rest = lines[1:]
    slug, title = title_line.replace("//", "").strip().split("|")
    koans = OrderedDict()
    current = None
    comment_bracket_cnt = 0
    for line in rest:
        if line.strip().startswith("//"):
            current = line.replace("//", "").strip()
            koans[current] = {"hint": [], "code": [], "slug": current.split("|")[0], "name": current.split("|")[1]}
        elif line.strip().startswith("/*"):
            koans[current]["hint"].append(line)
            comment_bracket_cnt += 1
        elif line.strip().startswith("*/"):
            koans[current]["hint"].append(line)
            comment_bracket_cnt -= 1
        else:
            if comment_bracket_cnt != 0:
                koans[current]["hint"].append(line)
            else:
                koans[current]["code"].append(line)

    for koan in koans.values():
        koan["hint"] = "".join(koan["hint"])
        koan["code"] = "".join(koan["code"])

    ks = [Koan(k["slug"], k["name"], k["hint"], k["code"]) for k in koans.values()]
    m = Meditation(slug, title)
    for k in ks:
        m.add_koan(k)
    return m
コード例 #3
0
def reduce_with_semi_intelligent_agents(g):
    #randomly assign coordinates
    coordinates_as_dict = OrderedDict()
    for vertex in g.vs:
        node_id = vertex['name']
        x = random.random() * 100
        y = random.random() * 100
        coordinates_as_dict[node_id] = (x, y)
    coordinates_as_numpy_array = np.array(coordinates_as_dict.values())

    for i in range(40):
        print i

        coordinates_as_dict, avg_distance_to_connected_nodes = figure_out_everyones_next_move(
            i, g, coordinates_as_dict, coordinates_as_numpy_array)

        #do this before we overwrite coordinates_as_numpy_array
        #doing this to save computation (this way we can use 
        #figure_out_everyones_next_move to get avg_distance_to_connected_nodes)
        how_are_we_doing_compared_to_our_objective(avg_distance_to_connected_nodes, 
            coordinates_as_numpy_array)

        coordinates_as_numpy_array = np.array(coordinates_as_dict.values())

    return coordinates_as_numpy_array
コード例 #4
0
def test():
    from collections import OrderedDict as StdlibOrderedDict

    ordered_dict = OrderedDict(((1, 'a'), (2, 'b'), (3, 'c')))
    stdlib_ordered_dict = StdlibOrderedDict(((1, 'a'), (2, 'b'), (3, 'c')))
    
    assert ordered_dict == stdlib_ordered_dict
    assert stdlib_ordered_dict == ordered_dict
    assert ordered_dict.items() == stdlib_ordered_dict.items()
    assert ordered_dict.keys() == stdlib_ordered_dict.keys()
    assert ordered_dict.values() == stdlib_ordered_dict.values()
    
    ordered_dict.move_to_end(1)
    
    assert ordered_dict != stdlib_ordered_dict
    #assert stdlib_ordered_dict != ordered_dict
    assert ordered_dict.items() != stdlib_ordered_dict.items()
    assert ordered_dict.keys() != stdlib_ordered_dict.keys()
    assert ordered_dict.values() != stdlib_ordered_dict.values()
    
    del stdlib_ordered_dict[1]
    stdlib_ordered_dict[1] = 'a'
    
    assert ordered_dict == stdlib_ordered_dict
    assert stdlib_ordered_dict == ordered_dict
    assert ordered_dict.items() == stdlib_ordered_dict.items()
    assert ordered_dict.keys() == stdlib_ordered_dict.keys()
    assert ordered_dict.values() == stdlib_ordered_dict.values()
    
    assert ordered_dict == OrderedDict(stdlib_ordered_dict) == \
                                                            stdlib_ordered_dict
    assert ordered_dict == StdlibOrderedDict(ordered_dict) == \
                                                            stdlib_ordered_dict
    
コード例 #5
0
ファイル: mywidgets.py プロジェクト: bsherin/shared_tools
class CheckGroupNoParameters(QGroupBox):
    def __init__(self, group_name, name_list, default=None, help_instance=None, handler=None, help_dict=None):
        QGroupBox.__init__(self, group_name)
        self.handler = handler
        self.help_dict = help_dict
        self.help_instance = help_instance

        self.the_layout = QVBoxLayout()
        self.the_layout.setSpacing(5)
        self.the_layout.setContentsMargins(1, 1, 1, 1)
        self.setLayout(self.the_layout)
        self.widget_dict = OrderedDict([])
        self.is_popup = False
        self.create_check_boxes(name_list)
        if default is not None:
            self.set_myvalue([default])
        return
    
    def reset(self):
        for cb in self.widget_dict.values():
            cb.setChecked(False)

    def create_check_boxes(self, name_list):
        for txt in name_list:
            qh = QHBoxLayout()
            cb = QCheckBox(txt)
            cb.setFont(regular_small_font)
            qh.addWidget(cb)
            qh.addStretch()
            self.the_layout.addLayout(qh)
            if self.handler != None:
                cb.toggled.connect(self.handler)
            self.widget_dict[txt] = cb
            if (self.help_dict != None) and (self.help_instance != None):
                if txt in self.help_dict:
                    help_button_widget = self.help_instance.create_button(txt, self.help_dict[txt])
                    qh.addWidget(help_button_widget)

    def recreate_check_boxes(self, new_name_list):
        for cb in self.widget_dict.values():
            cb.hide()
            cb.deleteLater()
            del cb
        self.widget_dict = {}
        self.create_check_boxes(new_name_list)
    
    # returns a list where each item is [name, parameter value]
    def get_myvalue(self):
        result = []
        for (fe, val) in self.widget_dict.items():
            if val.isChecked():
                result.append(fe)
        return result
    
    # Takes a lists where each item is [name, parameter value]
    def set_myvalue(self, true_items):
        self.reset()
        for fe in true_items:
            self.widget_dict[fe].setChecked(True)
    value = property(get_myvalue, set_myvalue)
コード例 #6
0
ファイル: plot.py プロジェクト: DanlanChen/blocks-extras
def print_column_summary(experiments):
    """Print a list of all columns contained in the given experiments.

    Parameters
    ----------
    experiments : OrderedDict of {str: DataFrame}
        The key is expected to be an experiment identifier
        (e.g. a filename) and the value a pandas.DataFrame.

    """
    channels_per_experiment = OrderedDict(
        [(fname, set(df.columns)) for fname, df in iteritems(experiments)]
    )
    all_channels = reduce(set.union, channels_per_experiment.values())

    print("{} experiment(s):".format(len(experiments)))
    for i, fname in enumerate(experiments):
        print("    {}: {}".format(i, fname))
    print()
    print("Containing the following channels:")
    for ch in sorted(all_channels):
        # create a string indicating which experiments contain which
        #  channels
        indicator = []
        for i, channels in enumerate(channels_per_experiment.values()):
            if ch in channels:
                indicator.append(str(i))
            else:
                indicator.append(" ")
        indicator = ",".join(indicator)
        print("    {}: {}".format(indicator, ch))
コード例 #7
0
class Counter:
    """
    Remembers the order in which it counted something
    """

    def __init__(self, startat=0):
        self._ordered = OrderedDict()
        self.startat = startat

    def add(self, key, step=1):
        if not key in self._ordered:
            self._ordered[key] = self.startat
        self._ordered[key] += step

    @property
    def maximum_in_order_added(self):
        """
        Returns a list of the highest-frequency items
        in the order in which they were added
        """
        if self._ordered.values():
            maximum = max(self._ordered.values())
        else:
            return []
        r = []
        for item in self._ordered:
            if self._ordered[item] == maximum and item not in r:
                r.append(item)
        return r
コード例 #8
0
	def barPlot(self, datalist, threshold, figname):

		tally = self.geneCount(datalist)

		#Limit the items plotted to those over 1% of the read mass
		geneplot = defaultdict()
		for g, n in tally.iteritems():
			if n > int(sum(tally.values())*threshold):
				geneplot[g] = n

		#Get plotting values
		olist = OrderedDict(sorted(geneplot.items(),key=lambda t: t[0]))
		summe = sum(olist.values())
		freq = [float(x)/float(summe) for x in olist.values()]
		
		#Create plot
		fig = plt.figure()
		width = .35
		ind = np.arange(len(geneplot.keys()))
		plt.bar(ind, freq)
		plt.xticks(ind + width, geneplot.keys())
		locs, labels = plt.xticks() 
		plt.setp(labels, rotation=90)
		plt.show()

		fig.savefig(figname)
		print("Saved bar plot as: "+figname)
コード例 #9
0
ファイル: query.py プロジェクト: amjadm61/ichnaea
    def cell(self, values):
        if not values:
            values = []
        values = list(values)
        self._cell_unvalidated = values

        filtered_areas = OrderedDict()
        filtered_cells = OrderedDict()
        for value in values:
            valid_area = CellAreaLookup.create(**value)
            if valid_area:
                areaid = valid_area.areaid
                existing = filtered_areas.get(areaid)
                if existing is not None and existing.better(valid_area):
                    pass
                else:
                    filtered_areas[areaid] = valid_area
            valid_cell = CellLookup.create(**value)
            if valid_cell:
                cellid = valid_cell.cellid
                existing = filtered_cells.get(cellid)
                if existing is not None and existing.better(valid_cell):
                    pass
                else:
                    filtered_cells[cellid] = valid_cell
        self._cell_area = list(filtered_areas.values())
        self._cell = list(filtered_cells.values())
コード例 #10
0
ファイル: base.py プロジェクト: vivsh/django-ginger
class FormattedTableColumnSet(object):
    def __init__(self, table, values):
        self.columns = OrderedDict((n, FormattedTableColumn(n, p, table)) for n, p in values)

    def visible_columns(self):
        return [col for col in self.columns.values() if not col.hidden]

    def hidden_columns(self):
        return [col for col in self.columns.values() if col.hidden]

    def keys(self):
        return self.columns.keys()

    def __iter__(self):
        for value in self.columns.values():
            yield value

    def __contains__(self, item):
        return item in self.columns

    def __getitem__(self, item):
        return self.columns[item]

    def __getattr__(self, item):
        return self.columns[item]

    def __len__(self):
        return len(self.columns)

    def __bool__(self):
        return len(self) > 0
コード例 #11
0
ファイル: ui.py プロジェクト: rowhit/meson
 def _pkgconfig_detect(self, mods, kwargs):
     # We set the value of required to False so that we can try the
     # qmake-based fallback if pkg-config fails.
     kwargs['required'] = False
     modules = OrderedDict()
     for module in mods:
         modules[module] = PkgConfigDependency(self.qtpkgname + module, self.env,
                                               kwargs, language=self.language)
     for m in modules.values():
         if not m.found():
             self.is_found = False
             return
         self.compile_args += m.get_compile_args()
         self.link_args += m.get_link_args()
     self.is_found = True
     self.version = m.version
     self.pcdep = list(modules.values())
     # Try to detect moc, uic, rcc
     if 'Core' in modules:
         core = modules['Core']
     else:
         corekwargs = {'required': 'false', 'silent': 'true'}
         core = PkgConfigDependency(self.qtpkgname + 'Core', self.env, corekwargs,
                                    language=self.language)
         self.pcdep.append(core)
     # Used by self.compilers_detect()
     self.bindir = self.get_pkgconfig_host_bins(core)
     if not self.bindir:
         # If exec_prefix is not defined, the pkg-config file is broken
         prefix = core.get_pkgconfig_variable('exec_prefix', {})
         if prefix:
             self.bindir = os.path.join(prefix, 'bin')
コード例 #12
0
ファイル: crossbar.py プロジェクト: enjoy-digital/liteusb
class LiteUSBCrossbar(Module):
    def __init__(self):
        self.users = OrderedDict()
        self.master = LiteUSBMasterPort(8)
        self.dispatch_param = "dst"

    def get_port(self, dst):
        port = LiteUSBUserPort(8, dst)
        if dst in self.users.keys():
            raise ValueError("Destination {0:#x} already assigned".format(dst))
        self.users[dst] = port
        return port

    def do_finalize(self):
        # TX arbitrate
        sinks = [port.sink for port in self.users.values()]
        self.submodules.arbiter = Arbiter(sinks, self.master.source)

        # RX dispatch
        sources = [port.source for port in self.users.values()]
        self.submodules.dispatcher = Dispatcher(self.master.sink,
                                                sources,
                                                one_hot=True)
        cases = {}
        cases["default"] = self.dispatcher.sel.eq(0)
        for i, (k, v) in enumerate(self.users.items()):
            cases[k] = self.dispatcher.sel.eq(2**i)
        self.comb += \
            Case(getattr(self.master.sink, self.dispatch_param), cases)
コード例 #13
0
ファイル: printer.py プロジェクト: rgeoghegan/jiraedit
class JiraPrinter:
    def __init__(self, serialized):
        self.serialized = serialized
        self.fields = OrderedDict()

        for field in self.field_definitions():
            field.bind(self.serialized)
            self.fields[field.name] = field

    def field_definitions(self):
        return [
            JiraSummaryField('summary'),
            JiraField('Assignee', 'Assignee'),
            JiraDescription('Description', 'Description'),
        ]

    def __str__(self):
        output = []
        for field in self.fields.values():
            output.append(str(field))
        return "\n".join(output)

    def parse(self, stream):
        lines = LineBuffer(stream)

        output = OrderedDict()
        output['key'] = self.serialized['key']

        for field in self.fields.values():
            output[field.name] = field.parse(lines)

        return output
コード例 #14
0
ファイル: idr.py プロジェクト: Simon-Coetzee/idr
def iter_merge_grpd_intervals(
        intervals, n_samples, pk_agg_fn,
        use_oracle_pks, use_nonoverlapping_peaks):
    # grp peaks by their source, and calculate the merged
    # peak boundaries
    grpd_peaks = OrderedDict([(i+1, []) for i in range(n_samples)])
    pk_start, pk_stop = 1e12, -1
    for interval, sample_id in intervals:
        # if we've provided a unified peak set, ignore any intervals that 
        # don't contain it for the purposes of generating the merged list
        if (not use_oracle_pks) or sample_id == 0:
            pk_start = min(interval.start, pk_start)
            pk_stop = max(interval.stop, pk_stop)
        # if this is an actual sample (ie not a merged peaks)
        if sample_id > 0:
            grpd_peaks[sample_id].append(interval)
    
    # if there are no identified peaks, continue (this can happen if 
    # we have a merged peak list but no merged peaks overlap sample peaks)
    if pk_stop == -1:
        return None

    # skip regions that dont have a peak in all replicates
    if not use_nonoverlapping_peaks:
        if any(0 == len(peaks) for peaks in grpd_peaks.values()):
            return None

    # find the merged peak summit
    # note that we can iterate through the values because 
    # grpd_peaks is an ordered dict
    replicate_summits = []
    for sample_id, pks in grpd_peaks.items():
        # if an oracle peak set is specified, skip the replicates
        if use_oracle_pks and sample_id != 0: 
            continue

        # initialize the summit to the first peak
        try: replicate_summit, summit_signal = pks[0].summit, pks[0].signal
        except IndexError: replicate_summit, summit_signal =  None, -1e9
        # if there are more peaks, take the summit that corresponds to the 
        # replicate peak with the highest signal value
        for pk in pks[1:]:
            if pk.summit != None and pk.signal > summit_signal:
                replicate_summit, summit_signal = pk.summit, pk.signal
        # make sure a peak summit was specified
        if replicate_summit != None:
            replicate_summits.append( replicate_summit )

    summit = ( int(mean(replicate_summits)) 
               if len(replicate_summits) > 0 else None )

    # note that we can iterate through the values because 
    # grpd_peaks is an ordered dict
    signals = [pk_agg_fn(pk.signal for pk in pks) if len(pks) > 0 else 0
              for pks in grpd_peaks.values()]
    merged_pk = (pk_start, pk_stop, summit, 
                 pk_agg_fn(signals), signals, grpd_peaks)

    yield merged_pk
    return
コード例 #15
0
ファイル: base.py プロジェクト: tkaemming/dht
class Cluster(object):
    def __init__(self, members):
        self.hash = hash
        self.members = OrderedDict(((self.hash(node), node) for node in members))

    def __len__(self):
        return sum((len(node) for node in self.members.values()))

    def __getitem__(self, key):
        return self.location(key)[key]

    def __setitem__(self, key, value):
        self.location(key)[key] = value

    def __delitem__(self, key):
        del self.location(key)[key]

    def location(self, key):
        """
        Returns where a given key should be stored.
        """
        hashed = self.hash(key)
        try:
            return last(takewhile(lambda pair: pair[0] <= hashed,
                self.members.items()))[1]
        except ValueError:
            # "wrap around" the ring of nodes to the last node if no nodes
            # have a hashed value that is lower than or equal to the hashed
            # value of the key
            return self.members.values()[-1]
コード例 #16
0
ファイル: menu.py プロジェクト: agronholm/websauna
class Menu:
    """Menu is a collection of items in a Bootstrap pull-down menu.

    It allow mechanism for third party packages to register their own entries.
    """

    template = "admin/menu/menu.html"

    def __init__(self):
        self.entries = OrderedDict()

    # TODO: How to define entry type here to be Entry (circular)
    def add_entry(self, entry):
        self.entries[entry.id] = entry

    def has_items(self, request: Request) -> bool:
        """Has this menu any entries to draw."""
        return any(entry.is_enabled(request) for entry in self.entries.values())

    def get_entries(self) -> List:
        """Get Entry objects to be rendered in this menu."""
        return self.entries.values()

    def get_entry(self, id):
        """Get any of registered menu entries by its id."""
        return self.entries[id]
コード例 #17
0
def fixgridsearch(hparamfile,generate):

    hparams = OrderedDict()
    dhparams = OrderedDict()

    for hparam in HparamReader(hparamfile):

        if "generate" not in hparam or hparam["generate"] in ["default",""]:
            if hparam["generate"]=="":
                print "*** Warning ***"
                print "    Hyperparameter",hparam["hparam"]
                print "    Please set generation mode : default"

            hparam["generate"] = generate

        dhparams[hparam['hparam']] = hparam.pop("default")

        name = hparam.pop("hparam")
        hparams[name] = hparams.get(name,[]) + list(make_hparams(**hparam))

    values = np.zeros((sum([len(hparam) for hparam in hparams.values()]),len(hparams.keys())))

    j = 0
    for i, hparam in enumerate(hparams.items()):
        # set all default values
        values[j:j+len(hparam[1])] = np.array(dhparams.values())
        # set the value of the current hyper-parameter
        values[j:j+len(hparam[1]),i] = np.array(hparam[1])

        j += len(hparam[1])

    return hparams.keys(), values
コード例 #18
0
ファイル: api.py プロジェクト: eads/elex
    def get_uniques(self, candidate_reporting_units):
        """
        Parses out unique candidates and ballot measures
        from a list of CandidateReportingUnit objects.
        """
        unique_candidates = OrderedDict()
        unique_ballot_measures = OrderedDict()

        for c in candidate_reporting_units:
            if c.is_ballot_measure:
                if not unique_ballot_measures.get(c.candidateid, None):
                    unique_ballot_measures[c.candidateid] = BallotMeasure(
                                                                last=c.last,
                                                                candidateid=c.candidateid,
                                                                polid=c.polid,
                                                                ballotorder=c.ballotorder,
                                                                polnum=c.polnum,
                                                                seatname=c.seatname,
                                                                description=c.description)
            else:
                if not unique_candidates.get(c.candidateid, None):
                    unique_candidates[c.candidateid] = Candidate(
                                                                first=c.first,
                                                                last=c.last,
                                                                candidateid=c.candidateid,
                                                                polid=c.polid,
                                                                ballotorder=c.ballotorder,
                                                                polnum=c.polnum,
                                                                party=c.party)

        candidates = [v for v in unique_candidates.values()]
        ballot_measures = [v for v in unique_ballot_measures.values()]
        return candidates, ballot_measures 
コード例 #19
0
    def update_data_to_contain_top10_words(self, tagged_data, top10):
        """
        Takes in a data set and removes the occurances of all words except top 10 entropy words
        :param tagged_data tagged dataset
        :param top10 top 10 highest entropy words
        :return updated data set
        """
        sentence_index = range(len(tagged_data))
        indexed_sentences = OrderedDict(zip(sentence_index,tagged_data))
        for sentence_id, sentence in indexed_sentences.items():
            word_index = range(len(tagged_data[sentence_id]))
            indexed_words = OrderedDict(zip(word_index,tagged_data[sentence_id]))
            for word_id, tagged_word in indexed_words.items():
                if tagged_word[0] not in top10:
                    del indexed_words[word_id]
            indexed_sentences[sentence_id] = list(indexed_words.values())

        sentence_index = range(len(tagged_data))
        updated_tagged_data = list(indexed_sentences.values())
        indexed_sentences = OrderedDict(zip(sentence_index, updated_tagged_data))
        for sentence_id, sentence in indexed_sentences.items():
            if not sentence:
                del indexed_sentences[sentence_id]
        updated_tagged_data = list(indexed_sentences.values())
        return updated_tagged_data
コード例 #20
0
ファイル: utils.py プロジェクト: avsd/django-allauth
def cleanup_email_addresses(request, addresses):
    """
    Takes a list of EmailAddress instances and cleans it up, making
    sure only valid ones remain, without multiple primaries etc.

    Order is important: e.g. if multiple primary e-mail addresses
    exist, the first one encountered will be kept as primary.
    """
    from .models import EmailAddress
    adapter = get_adapter()
    # Let's group by `email`
    e2a = OrderedDict()  # maps email to EmailAddress
    primary_addresses = []
    verified_addresses = []
    primary_verified_addresses = []
    for address in addresses:
        # Pick up only valid ones...
        email = valid_email_or_none(address.email)
        if not email:
            continue
        # ... and non-conflicting ones...
        if (app_settings.UNIQUE_EMAIL
                and EmailAddress.objects
                .filter(email__iexact=email)
                .exists()):
            continue
        a = e2a.get(email.lower())
        if a:
            a.primary = a.primary or address.primary
            a.verified = a.verified or address.verified
        else:
            a = address
            a.verified = a.verified or adapter.is_email_verified(request,
                                                                 a.email)
            e2a[email.lower()] = a
        if a.primary:
            primary_addresses.append(a)
            if a.verified:
                primary_verified_addresses.append(a)
        if a.verified:
            verified_addresses.append(a)
    # Now that we got things sorted out, let's assign a primary
    if primary_verified_addresses:
        primary_address = primary_verified_addresses[0]
    elif verified_addresses:
        # Pick any verified as primary
        primary_address = verified_addresses[0]
    elif primary_addresses:
        # Okay, let's pick primary then, even if unverified
        primary_address = primary_addresses[0]
    elif e2a:
        # Pick the first
        primary_address = e2a.keys()[0]
    else:
        # Empty
        primary_address = None
    # There can only be one primary
    for a in e2a.values():
        a.primary = primary_address.email.lower() == a.email.lower()
    return list(e2a.values()), primary_address
コード例 #21
0
ファイル: wiki.py プロジェクト: 0xcd03/reddit
    def get_all(cls, return_dict=False):
        items = OrderedDict()
        try:
            wp = WikiPage.get(*cls._get_wiki_config())
        except NotFound:
            return items if return_dict else items.values()
        wp_content = StringIO(wp.content)
        cfg = SafeConfigParser(allow_no_value=True)
        cfg.readfp(wp_content)

        for section in cfg.sections():
            def_values = {'id': section}
            for name, value in cfg.items(section):
                # coerce boolean variables
                if name in cls._bool_values:
                    def_values[name] = cfg.getboolean(section, name)
                else:
                    def_values[name] = value

            try:
                item = cls(**def_values)
            except TypeError:
                # a required variable wasn't set for this item, skip
                continue

            if item.is_enabled:
                items[section] = item
        
        return items if return_dict else items.values()
コード例 #22
0
ファイル: __init__.py プロジェクト: andrewbolster/aietes
def plot_nodes(node_positions, node_links=None, radius=1.0, scalefree=False, square=True, figsize=None):
    fig, ax = plt.subplots(1, 1, figsize=figsize)
    nodes = []
    node_positions = OrderedDict(sorted(node_positions.items()))
    for node, position in node_positions.items():
        x, y, z = position
        ax.scatter(x, y)
        ax.annotate(node, xy=(x, y), xytext=(-10, 5), textcoords='offset points', ha='center', va='bottom')
        ax.add_patch(plt.Circle((x, y), radius=radius, edgecolor='b', fill=False, alpha=0.2))

    if node_links:
        for node, links in node_links.items():
            for link in links:
                x, y = zip(node_positions.values()[node][0:2], node_positions.values()[link][0:2])
                ax.plot(x, y, color='k', lw=1, alpha=1.0, linestyle=':')

    if square:
        ax.set_aspect('equal', adjustable='datalim')
    if scalefree:
        ax = format_axes(ax)
        ax.set_xticklabels([])
        ax.set_yticklabels([])
        ax.grid(False)

    return fig
コード例 #23
0
class DashboardClassManager(object):
    """
    Class to handle registered dashboards class.
    """
    _register = OrderedDict()

    def __init__(self):
        self._register = OrderedDict()

    def register(self, slug, rclass):
        if slug in self._register:
            raise ValueError('Slug already exists: %s' % slug)
        setattr(rclass, 'slug', slug)
        self._register[slug] = rclass

    def get_dashboard(self, slug):
        # return class
        print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
        print slug
        print self._register.values()
        return self._register.get(slug, None)

    def get_dashboards(self):
        # return clasess
        return self._register.values()
コード例 #24
0
def drawing_lines(start_date, end_date):

    (positive_dict, negative_dict, neutral_dict) = get_sentiment_dates(start_date, end_date)
    positive_dict = OrderedDict(sorted(positive_dict.items(), key=lambda t: t[0]))
    pos_keys = positive_dict.keys()
    pos_vals = positive_dict.values()
    pos_keys = pos_keys[-30:]  # get the last 30 days

    negative_dict = OrderedDict(sorted(negative_dict.items(), key=lambda t: t[0]))
    neg_keys = negative_dict.keys()
    neg_vals = negative_dict.values()
    neg_keys = neg_keys[-30:]  # get the last 30 days

    neutral_dict = OrderedDict(sorted(neutral_dict.items(), key=lambda t: t[0]))
    neu_keys = neutral_dict.keys()
    neu_vals = neutral_dict.values()
    neu_keys = neu_keys[-30:]  # get the last 30 days

    figure_title = 'Sentiment between ' + start_date + ' and ' + end_date
    fig, ax = plt.subplots()
    ax.plot(pos_keys, pos_vals, 'o-', label='Positive')
    ax.plot(neg_keys, neg_vals, 'o-', label='Negative')
    ax.plot(neu_keys, neu_vals, 'o-', label='Neutral')
    fig.autofmt_xdate()
    plt.legend(shadow=True, fancybox=True)
    plt.title(figure_title)
    plt.show()

    return
コード例 #25
0
def produce_report(request):
    '''
    Generate the CSV file for all authorised claims and save to disk..
    :param request:
    :return:
    '''
    CLAIM_TYPES = ClaimType.objects.values_list('name', flat=True).order_by('name')
    current_claim_owner = None
    csv_data_line = OrderedDict()
    def new_claim_owner(claim_owner):
        current_claim_owner = claim_owner
        csv_data_line = OrderedDict([('first_name', claim_owner.first_name), ('last_name', claim_owner.last_name),
                                    ('staff_number', claim_owner.staff_number)])
        for claim_type in CLAIM_TYPES:
            csv_data_line[claim_type] = 0
        return current_claim_owner, csv_data_line

    response = HttpResponse(content_type='text/csv')
    response['Content-Disposition'] = 'attachment; filename="employee_claims{}.csv"'.format(time.strftime("%Y%m%d-%H%M%S"))
    writer = csv.writer(response)
    header_list = ['employee first name', 'employee last name', 'employee staff number']
    header_list += CLAIM_TYPES
    writer.writerow(header_list)
    for claim in Claim.objects.filter(senior_authorised=True, processed=False).order_by('owner'):
        if not claim.owner in [current_claim_owner]:
            if current_claim_owner: # this is not the first time:
                writer.writerow(list(csv_data_line.values()))
            current_claim_owner, csv_data_line = new_claim_owner(claim.owner)
        csv_data_line[claim.type.name] += claim.claim_value
        claim.processed = True
        claim.save()
    if csv_data_line:
        writer.writerow(list(csv_data_line.values()))
    return response
コード例 #26
0
    def __pprint_project(self, tbl, group, proj, more=False):
        tbl.next_layout(self.__layout_group)
        dismissed = ''
        back = colorama.Back.WHITE
        if self.__dict[group][proj]['dismissed']:
            dismissed = '     (DISMISSED)'
            back = colorama.Back.RED


        tbl.add_row(['Project name', 'Members' + dismissed], back + colorama.Fore.BLACK + colorama.Style.BRIGHT)

        first_member = True
        for member in self.__dict[group][proj]['members']:
            if first_member:
                first_member = False
                tbl.add_row([proj,member], back + colorama.Fore.BLACK + colorama.Style.NORMAL)
            else:
                tbl.add_rrow([member], back + colorama.Fore.BLACK + colorama.Style.NORMAL)

        if more:
            tbl.next_layout(self.__layout_info)
            results = OrderedDict(sorted(self.__dict[group][proj]['results'].items()))
            jokers = OrderedDict(sorted(self.__dict[group][proj]['joker'].items()))

            tbl.add_rrow(list(results.keys()), back + colorama.Fore.BLACK + colorama.Style.BRIGHT)
            tbl.add_row(['results:'] + list(results.values()), back + colorama.Fore.BLACK + colorama.Style.NORMAL)
            tbl.add_row(['joker:'] + list(jokers.values()), back + colorama.Fore.BLACK + colorama.Style.NORMAL)
コード例 #27
0
class ActionGroupResult(object):
    def __init__(self, actiongroup):
        self.actiongroup = actiongroup
        self._actionresults = OrderedDict()

    def __getitem__(self, actiongroup_name):
        return self._actionresults[actiongroup_name]

    def __iter__(self):
        return self._actionresults.values()

    @property
    def failed(self):
        for actionresult in self._actionresults.values():
            if actionresult.failed:
                return True
        return False

    def add_actionresult(self, actionresult):
        self._actionresults[actionresult.actionclass.get_name()] = actionresult

    def to_dict(self):
        resultlist = []
        for actionresult in self._actionresults.values():
            resultlist.append(actionresult.to_dict())
        return {"actiongroup_name": self.actiongroup.name, "results": resultlist}

    def __str__(self):
        return json.dumps(self.to_dict(), indent=4)
コード例 #28
0
ファイル: crossbar.py プロジェクト: aomtoku/liteeth
class LiteEthCrossbar(Module):
    def __init__(self, master_port, dispatch_param):
        self.users = OrderedDict()
        self.master = master_port(8)
        self.dispatch_param = dispatch_param

    # overload this in derived classes
    def get_port(self, *args, **kwargs):
        pass

    def do_finalize(self):
        # TX arbitrate
        sinks = [port.sink for port in self.users.values()]
        self.submodules.arbiter = Arbiter(sinks, self.master.source)

        # RX dispatch
        sources = [port.source for port in self.users.values()]
        self.submodules.dispatcher = Dispatcher(self.master.sink,
                                                sources,
                                                one_hot=True)
        cases = {}
        cases["default"] = self.dispatcher.sel.eq(0)
        for i, (k, v) in enumerate(self.users.items()):
            cases[k] = self.dispatcher.sel.eq(2**i)
        self.comb += \
            Case(getattr(self.master.sink, self.dispatch_param), cases)
コード例 #29
0
ファイル: pystatus.py プロジェクト: JohnBox/pystatus
def main():
    cfg = parser.parse('./pystatus.ini')
    VERSION = {'version': 1}
    print(dumps(VERSION))
    print('[')
    interval = float(cfg['PYSTATUS'].get('refresh', '1'))
    panel = OrderedDict()
    panel['time'] = time.Time(cfg['TIME'])
    panel['battery'] = battery.Battery(cfg['BATTERY'])
    panel['sound'] = sound.Sound(cfg['SOUND'])
    # panel['wifi'] = wifi.Wifi(cfg['WIFI'])
    panel['ethernet'] = ethernet.Ethernet(cfg['ETHERNET'])
    panel['cputemp'] = cputemp.CpuTemp(cfg['CPUTEMP'])
    panel['cpufan'] = cpufan.CpuFan(cfg['CPUFAN'])
    panel['ram'] = ram.RAM(cfg['RAM'])
    panel['vk'] = vk.VK(cfg['VK'])
    # panel['gmail'] = gmail.Gmail(cfg['GMAIL'])
    while True:
        start = t.time()
        visibled = list(filter(lambda i: i.visible, reversed(panel.values())))
        print(visibled, end=',\n')
        list(map(lambda i: i.refresh(), panel.values()))
        late = t.time() - start

        sleep((interval - late) if (interval - late) > 0 else interval)
        stdout.flush()
コード例 #30
0
ファイル: statplot.py プロジェクト: EnricoGiampieri/dataplot
 def recursive_split(rect_key,rect_coords,category_idx,split_dir,gap):
     """
     given a key of the boxes and the data to analyze,
     split the key into several keys stratificated by the given
     category in the assigned direction
     """
     ticks = []
     category = categories[category_idx]
     chiave=rect_key
     divisione = OrderedDict()
     for tipo in category:
         divisione[tipo]=0.
         for k,v in counted.items():
             if k[len(rect_key)-1]!=tipo:
                 continue 
             if not all( k[k1]==v1 for k1,v1 in enumerate(rect_key[1:])):
                 continue
             divisione[tipo]+=v
     totali = 1.*sum(divisione.values())
     if totali: #check for empty categories
         divisione = OrderedDict( (k,v/totali) for k,v in divisione.items() )
     else:
         divisione = OrderedDict( (k,0.) for k,v in divisione.items() )
     prop = divisione.values()
     div_keys = divisione.keys()
     new_rects = split_rect(*rect_coords,proportion=prop,direction=split_dir,gap=gap)
     divisi = OrderedDict( (chiave+(k,),v) for k,v in zip(div_keys,new_rects))
     d = (split_dir == 'h')
     ticks = [ (k,O[d]+0.5*[h,w][d]) for k,(O,h,w) in zip(div_keys,new_rects) ]
     return divisi,zip(*ticks)
コード例 #31
0
ファイル: attrs.py プロジェクト: yxliang01/mypy
def _analyze_class(ctx: 'mypy.plugin.ClassDefContext', auto_attribs: bool,
                   kw_only: bool) -> List[Attribute]:
    """Analyze the class body of an attr maker, its parents, and return the Attributes found.

    auto_attribs=True means we'll generate attributes from type annotations also.
    kw_only=True means that all attributes created here will be keyword only args in __init__.
    """
    own_attrs = OrderedDict()  # type: OrderedDict[str, Attribute]
    # Walk the body looking for assignments and decorators.
    for stmt in ctx.cls.defs.body:
        if isinstance(stmt, AssignmentStmt):
            for attr in _attributes_from_assignment(ctx, stmt, auto_attribs,
                                                    kw_only):
                # When attrs are defined twice in the same body we want to use the 2nd definition
                # in the 2nd location. So remove it from the OrderedDict.
                # Unless it's auto_attribs in which case we want the 2nd definition in the
                # 1st location.
                if not auto_attribs and attr.name in own_attrs:
                    del own_attrs[attr.name]
                own_attrs[attr.name] = attr
        elif isinstance(stmt, Decorator):
            _cleanup_decorator(stmt, own_attrs)

    for attribute in own_attrs.values():
        # Even though these look like class level assignments we want them to look like
        # instance level assignments.
        if attribute.name in ctx.cls.info.names:
            node = ctx.cls.info.names[attribute.name].node
            if isinstance(node, PlaceholderNode):
                # This node is not ready yet.
                continue
            assert isinstance(node, Var)
            node.is_initialized_in_class = False

    # Traverse the MRO and collect attributes from the parents.
    taken_attr_names = set(own_attrs)
    super_attrs = []
    for super_info in ctx.cls.info.mro[1:-1]:
        if 'attrs' in super_info.metadata:
            # Each class depends on the set of attributes in its attrs ancestors.
            ctx.api.add_plugin_dependency(
                make_wildcard_trigger(super_info.fullname()))

            for data in super_info.metadata['attrs']['attributes']:
                # Only add an attribute if it hasn't been defined before.  This
                # allows for overwriting attribute definitions by subclassing.
                if data['name'] not in taken_attr_names:
                    a = Attribute.deserialize(super_info, data)
                    super_attrs.append(a)
                    taken_attr_names.add(a.name)
    attributes = super_attrs + list(own_attrs.values())

    # Check the init args for correct default-ness.  Note: This has to be done after all the
    # attributes for all classes have been read, because subclasses can override parents.
    last_default = False
    last_kw_only = False

    for attribute in attributes:
        if not attribute.init:
            continue

        if attribute.kw_only:
            # Keyword-only attributes don't care whether they are default or not.
            last_kw_only = True
            continue

        if not attribute.has_default and last_default:
            ctx.api.fail(
                "Non-default attributes not allowed after default attributes.",
                attribute.context)
        if last_kw_only:
            ctx.api.fail(
                "Non keyword-only attributes are not allowed after a keyword-only attribute.",
                attribute.context)
        last_default |= attribute.has_default

    return attributes
コード例 #32
0
            # 	body radius (SI units, i.e. meters)
            _b.R * 1000.,

            # safe_radius
            # 	body distance safe for a spacecraft fly-by
            ((2. * _b.R) if _b.body == 'jupiter' else (_b.R + 50.)) * 1000.,
            # "The spacecraft range to Jupiter cannot go below 2*R_J at any time"
            # "The flyby altitudes at the satellites (i.e. the range to the satellite centre at closest approach on the flyby minus the satellite radius) cannot be below 50 km"

            # name
            # 	body name
            _b.body,
        )) for _b in body_tuple.itervalues()
])

(jupiter, io, europa, ganymede, callisto) = body_obj.values()

# http://en.wikipedia.org/wiki/Orbital_elements
body_label = {
    #	'Object UID'    : ,

    # a specified point in time
    #	'Epoch (MJD)'   : ,

    # measure of the radius of an orbit taken from the points of that same orbit's two most distant points
    # http://en.wikipedia.org/wiki/Semimajor_axis
    'a (km)': r'$a$: semi major axis (km)',

    # The orbital eccentricity of an astronomical body is the amount by which its orbit deviates from a perfect circle, where 0 is perfectly circular, and 1.0 is a parabola, and no longer a closed orbit.
    # http://en.wikipedia.org/wiki/Orbital_eccentricity
    'e': r'$e$: eccentricity',
コード例 #33
0
    def get_rack_units(self,
                       user=None,
                       face=DeviceFaceChoices.FACE_FRONT,
                       exclude=None,
                       expand_devices=True):
        """
        Return a list of rack units as dictionaries. Example: {'device': None, 'face': 0, 'id': 48, 'name': 'U48'}
        Each key 'device' is either a Device or None. By default, multi-U devices are repeated for each U they occupy.

        :param face: Rack face (front or rear)
        :param user: User instance to be used for evaluating device view permissions. If None, all devices
            will be included.
        :param exclude: PK of a Device to exclude (optional); helpful when relocating a Device within a Rack
        :param expand_devices: When True, all units that a device occupies will be listed with each containing a
            reference to the device. When False, only the bottom most unit for a device is included and that unit
            contains a height attribute for the device
        """

        elevation = OrderedDict()
        for u in self.units:
            elevation[u] = {
                'id': u,
                'name': f'U{u}',
                'face': face,
                'device': None,
                'occupied': False
            }

        # Add devices to rack units list
        if self.pk:

            # Retrieve all devices installed within the rack
            queryset = Device.objects.prefetch_related(
                'device_type', 'device_type__manufacturer',
                'device_role').annotate(
                    devicebay_count=Count('devicebays')).exclude(
                        pk=exclude).filter(
                            rack=self,
                            position__gt=0,
                            device_type__u_height__gt=0).filter(
                                Q(face=face)
                                | Q(device_type__is_full_depth=True))

            # Determine which devices the user has permission to view
            permitted_device_ids = []
            if user is not None:
                permitted_device_ids = self.devices.restrict(
                    user, 'view').values_list('pk', flat=True)

            for device in queryset:
                if expand_devices:
                    for u in range(
                            device.position,
                            device.position + device.device_type.u_height):
                        if user is None or device.pk in permitted_device_ids:
                            elevation[u]['device'] = device
                        elevation[u]['occupied'] = True
                else:
                    if user is None or device.pk in permitted_device_ids:
                        elevation[device.position]['device'] = device
                    elevation[device.position]['occupied'] = True
                    elevation[device.
                              position]['height'] = device.device_type.u_height
                    for u in range(
                            device.position + 1,
                            device.position + device.device_type.u_height):
                        elevation.pop(u, None)

        return [u for u in elevation.values()]
コード例 #34
0
ファイル: two_layer_net.py プロジェクト: zeroam/TIL
class TwoLayerNet:
    def __init__(self,
                 input_size,
                 hidden_size,
                 output_size,
                 weight_init_std=0.01):
        # 가중치 초기화
        self.params = {}
        self.params['W1'] = weight_init_std * np.random.randn(
            input_size, hidden_size)
        self.params['b1'] = np.zeros(hidden_size)
        self.params['W2'] = weight_init_std * np.random.randn(
            hidden_size, output_size)
        self.params['b2'] = np.zeros(output_size)

        # 계층 생성
        self.layers = OrderedDict()
        self.layers['Affine1'] = Affine(self.params['W1'], self.params['b1'])
        self.layers['Relu1'] = Relu()
        self.layers['Affine2'] = Affine(self.params['W2'], self.params['b2'])

        self.lastLayer = SoftmaxWithLoss()

    def predict(self, x):
        for layer in self.layers.values():
            x = layer.forward(x)

        return x

    # x : 입력데이터, t : 정답 레이블
    def loss(self, x, t):
        y = self.predict(x)
        return self.lastLayer.forward(y, t)

    def accuracy(self, x, t):
        y = self.predict(x)
        y = np.argmax(y, axis=1)
        if t.ndim != 1:
            t = np.argmax(t, axis=1)

        accuracy = np.sum(y == t) / float(x.shape[0])
        return accuracy

    # x : 입력데이터, t : 정답 레이블
    def numerical_gradient(self, x, t):
        loss_W = lambda W: self.loss(x, t)

        grads = {}
        grads['W1'] = numerical_gradient(loss_W, self.params['W1'])
        grads['b1'] = numerical_gradient(loss_W, self.params['b1'])
        grads['W2'] = numerical_gradient(loss_W, self.params['W2'])
        grads['b2'] = numerical_gradient(loss_W, self.params['b2'])

        return grads

    def gradient(self, x, t):
        # forward
        self.loss(x, t)

        # backward
        dout = 1
        dout = self.lastLayer.backward(dout)

        layers = list(self.layers.values())
        layers.reverse()
        for layer in layers:
            dout = layer.backward(dout)

        # 결과 저장
        grads = {}
        grads['W1'], grads['b1'] = self.layers['Affine1'].dW, self.layers[
            'Affine1'].db
        grads['W2'], grads['b2'] = self.layers['Affine2'].dW, self.layers[
            'Affine2'].db

        return grads
コード例 #35
0
ファイル: inspector.py プロジェクト: alberto-sanchez/RDst
# merge charge conjugates and FSR
sorted_all_decays_merged = OrderedDict()
already_done = OrderedDict()

for k in sorted_all_decays.keys():
    reference = map(int, re.findall('[0-9]+', k))
    gammaless_reference = list(filter(lambda x: x != 22,
                                      reference))  # don't count FSR photons
    if repr(gammaless_reference) in already_done.keys():
        if len(k.replace('-', '')) > len(
                already_done[repr(gammaless_reference)].replace('-', '')):
            # print (k, 'longer than', already_done[repr(gammaless_reference)])
            continue
    already_done[repr(gammaless_reference)] = k

for v in already_done.values():
    sorted_all_decays_merged[v] = 0
    reference = map(int, re.findall('[0-9]+', v))
    gammaless_reference = list(filter(lambda x: x != 22,
                                      reference))  # don't count FSR photons
    for kk, vv in sorted_all_decays.items():
        undertest = map(int, re.findall('[0-9]+', kk))
        undertest = list(filter(lambda x: x != 22,
                                undertest))  # don't count FSR photons
        if undertest == gammaless_reference:
            sorted_all_decays_merged[v] += vv

sorted_all_decays_merged = OrderedDict(
    sorted(sorted_all_decays_merged.items(), key=lambda x: x[1], reverse=True))

with open('decay_no_acceptance_fullstat_test.pkl', 'wb') as fout:
コード例 #36
0
class Page(object):
    """
    A composite object to present multiple charts vertically in a single page
    """
    def __init__(self, page_title="EChart", **name_chart_pair):
        """
        Create a page instance.
        :param page_title: The title of generated html file.
        :param name_chart_pair: named charts as {<name>:<chart>}
        """
        self.page_title = page_title
        self._charts = OrderedDict()
        for k, v in name_chart_pair.items():
            self.add_chart(chart=v, name=k)

    def add(self, achart_or_charts):
        if not isinstance(achart_or_charts, (list, tuple, set)):
            achart_or_charts = (achart_or_charts, )  # Make it a sequence
        for c in achart_or_charts:
            self.add_chart(chart=c)
        return self

    def add_chart(self, chart, name=None):
        """
        Add a chart.New in v0.5.4
        :param chart:
        :param name:
        :return:
        """
        name = name or self._next_name()
        self._charts[name] = chart
        return self

    def _next_name(self):
        return "c{}".format(len(self))

    # List-Like Feature

    def __iter__(self):
        for chart in self._charts.values():
            yield chart

    def __len__(self):
        return len(self._charts)

    # Dict-Like Feature

    def __contains__(self, item):
        return item in self._charts

    def __getitem__(self, item):
        if isinstance(item, int):
            # list index
            return list(self._charts.values())[item]

        return self._charts[item]

    def __setitem__(self, key, value):
        self._charts[key] = value

    # Chart-Like Feature

    def render(self,
               path="render.html",
               template_name="simple_page.html",
               object_name="page",
               **kwargs):
        _, ext = os.path.splitext(path)
        _file_type = ext[1:]
        if _file_type != constants.DEFAULT_HTML:
            raise NotImplementedError(
                "Rendering Page instance as image is not supported!")

        env = engine.create_default_environment(constants.DEFAULT_HTML)
        env.render_chart_to_file(chart=self,
                                 object_name=object_name,
                                 path=path,
                                 template_name=template_name,
                                 **kwargs)

    def render_embed(self):
        """
        Produce rendered charts in html for embedding purpose
        """
        return Markup("<br/> ".join([chart.render_embed() for chart in self]))

    def get_js_dependencies(self):
        """
        Declare its javascript dependencies for embedding purpose
        """
        return CURRENT_CONFIG.produce_html_script_list(self.js_dependencies)

    def _repr_html_(self):
        """
        :return: html content for jupyter
        """
        dependencies = self.js_dependencies
        require_config = CURRENT_CONFIG.produce_require_configuration(
            dependencies)
        config_items = require_config["config_items"]
        libraries = require_config["libraries"]
        env = engine.create_default_environment(constants.DEFAULT_HTML)
        return env.render_chart_to_notebook(charts=self,
                                            config_items=config_items,
                                            libraries=libraries)

    @property
    def js_dependencies(self):
        # Treat self as a list,not a page
        return utils.merge_js_dependencies(*self)

    @classmethod
    def from_charts(cls, *charts):
        """
        A shortcut class method for building page object from charts.

        :param args: page arguments
        :return: Page instance
        """
        page = cls()
        for chart in charts:
            page.add_chart(chart)
        return page
コード例 #37
0
ファイル: __init__.py プロジェクト: keksnicoh/gpupy
class DomainGraph(components.widgets.Widget):
    """ 
    abstract class for graphs which are using
    the domain concept for plotting data. 

    """

    DEFAULT_DOMAIN_NAME = 'domain'

    resolution = attributes.VectorAttribute(2, (1, 1))
    viewport = attributes.VectorAttribute(2, (1, 1))

    def __init__(self, domain=None):
        """
        initializes the graph with one or many domains. 
        argument domain is an dict, it is interpreted as 
        (key, domain) pairs.
        """
        super().__init__()
        self.domains = OrderedDict()

        if isinstance(domain, dict):
            for k, v in domain.items():
                self[k] = v
        elif domain is not None:
            self[DomainGraph.DEFAULT_DOMAIN_NAME] = domain 


    def __setitem__(self, key, domain):
        """
        adds a domain to the graph
        """
        safe_name(key)
        domain.requires(list(self.domains.keys()))
        self.domains[key] = _DomainInfo(domain, 'd_{}'.format(key))


    def __getitem__(self, key):
        """
        returns a domain by key
        """
        return self.domains[key].domain


    def get_domain_glsl_substitutions(self):
        """
        returns a list of tuples 
          (glsl_substitution, glsl_identifier)

        where glsl_substitution is the name of the
        substitution e.g. ${name}
        """
        domain_sfnames = {}
        for dname, domain in self.domains.items():
            for field, glsl_id, glsl_meta, glsl_type in domain.glsl_identifier:
                substkey = 'D.'+dname
                if field is not None:
                    substkey += '.{}'.format(field)
                domain_sfnames.update({substkey: glsl_id})
        return domain_sfnames


    def _enable_domain_attrib_pointers(self):
        """
        enable all vertex attribute pointers
        from domains
        """
        for domain_info in self.domains.values():
            domain = domain_info.domain
            if hasattr(domain, 'attrib_pointers'):
                domain.attrib_pointers(domain_info.prefix, self.program.attributes)
コード例 #38
0
class EventConsolidator:
    @property
    def events(self):
        if self._ignore:
            self._pool = OrderedDict([(k, v) for k, v in self._pool.items()
                                      if not is_ignored(k)])
            self._final = OrderedDict([(k, v) for k, v in self._final.items()
                                       if not is_ignored(k)])
            self._initial = OrderedDict([(k, v)
                                         for k, v in self._initial.items()
                                         if not is_ignored(k)])

        i_pool = {v: k for k, v in self._pool.items()}
        i_final = {v: k for k, v in self._final.items()}
        i_initial = {v: k for k, v in self._initial.items()}

        moved = set()
        created = set(self._final.keys()) - set(self._initial.keys())
        deleted = set(self._initial.keys()) - set(self._final.keys())
        modified = set(i for i in self._pool.values()
                       if i.modified and not i.is_folder)

        # Probably don't need both loops here but better safe than sorry
        # If an item has been created and deleted it is actuall a move
        for key in set(created):
            src = i_initial.get(self._final[key])
            if src:
                created.remove(key)
                moved.add((src, key))

        for key in set(deleted):
            dest = i_final.get(self._initial[key])
            if dest:
                deleted.remove(key)
                moved.add((key, dest))

        # Sort by place in the file hierarchy
        # Children come before parents
        # NOTE: Create events must be sorted THE OPPOSITE direction
        sorter = lambda x: x.count(os.path.sep)

        # Windows reports folder deletes are file deletes + modifies
        # If a child exists for any file assume it is a directory
        for delete in deleted:
            for other in deleted:
                if delete != other and other.startswith(delete):
                    self._initial[delete].is_folder = True
                    break

        evts = list(
            itertools.chain(
                (events.DirMovedEvent(src, dest) if self._final[dest].is_folder
                 else events.FileMovedEvent(src, dest)
                 for src, dest in sorted(moved,
                                         key=lambda x: x[0].count(os.path.sep),
                                         reverse=True)),
                (events.DirDeletedEvent(x)
                 if self._initial[x].is_folder else events.FileDeletedEvent(x)
                 for x in sorted(deleted, key=sorter, reverse=True)),
                (events.DirCreatedEvent(x)
                 if self._final[x].is_folder else events.FileCreatedEvent(x)
                 for x in sorted(created, key=sorter)),
                (events.FileModifiedEvent(i_pool[x]) for x in modified
                 if x in i_final and not i_pool[x] in created),
            ))

        mapped = set([(getattr(event, 'dest_path',
                               event.src_path), event.event_type)
                      for event in evts if event.is_directory
                      and not event.event_type == EVENT_TYPE_CREATED])

        # Do our best to dedup all found events.
        # If there is a matching event type at a parent path disregard the children
        def check(event):
            segments = getattr(event, 'dest_path',
                               event.src_path).split(os.path.sep)
            for i in range(len(segments) - 1):
                if (os.path.sep.join(segments[:i + 1]),
                        event.event_type) in mapped:
                    return False
            return True

        return list(filter(check, evts))

    def __init__(self, ignore=True):
        self._events = []
        self._ignore = ignore
        self._pool = OrderedDict()
        self._final = OrderedDict()
        self._initial = OrderedDict()
        self._hash_pool = OrderedDict()

    def clear(self):
        self._events = []
        self._pool.clear()
        self._final.clear()
        self._initial.clear()
        self._hash_pool.clear()

    def push(self, event):
        self._events.append(event)

        self._push(event.src_path, event)
        if event.event_type == EVENT_TYPE_MOVED:
            self._push(event.dest_path, event, self._pool[event.src_path])

    def _push(self, path, event, item=None):
        copy_found = False
        # For the case where windows decideds that moves should actually be creates and deletes
        # If a delete or create event with a hash is seen check the hash pool for an matching hash
        # If found ensure the found event is opposite of our current event and set item to it.
        if event.event_type in (events.EVENT_TYPE_CREATED,
                                events.EVENT_TYPE_DELETED) and event.sha256:
            item = self._hash_pool.pop(event.sha256, None)
            if item and {event.event_type, item.events[0].event_type} != {
                    events.EVENT_TYPE_CREATED, events.EVENT_TYPE_DELETED
            }:
                item = None
            elif item:
                copy_found = True
                item.modified = False
                self._pool[path] = item

        item = self._pool.setdefault(path, item or Item(event.is_directory))

        if sys.platform == 'win32' and event.event_type == EVENT_TYPE_MODIFIED and item.events and item.events[
                -1].event_type in (EVENT_TYPE_MOVED, EVENT_TYPE_CREATED):
            return  # Windows really likes emmiting modfied events. If a modified is prefaced by a MOVE or CREATE it should/can be ignored

        if event.sha256 and not copy_found:
            # If this is an unmatched event with a hash add it to the pool
            self._hash_pool.setdefault(event.sha256, item)

        item.events.append(event)

        if event.event_type == EVENT_TYPE_MODIFIED:
            item.modified = True

        if event.event_type != EVENT_TYPE_DELETED and (
                event.event_type != EVENT_TYPE_MOVED
                or path == event.dest_path):
            # If this event would result in the item in question existing in the final virtual state, add it.
            self._final[path] = item
        else:
            # Otherwise ensure that item and it's children, if any, are not in the final virutal state.
            self._final.pop(path, None)
            for key in list(self._final.keys()):
                if key.startswith(path):
                    self._final.pop(key)

        if event.event_type != EVENT_TYPE_CREATED and (
                event.event_type != EVENT_TYPE_MOVED
                or path == event.src_path):
            if (item.events[0].event_type == EVENT_TYPE_CREATED
                    and not copy_found) or (
                        item.events[0].event_type == EVENT_TYPE_MOVED
                        and item.events[0].dest_path == path):
                return  # If this file was created by an event, don't create an initial place holder for it.
            # If this event indicates the item in question would have existed in the initial virtual state, add it.
            self._initial[path] = item
        else:
            # Otherwise ensure that item and it's children, if any, are not in the inital virtual state.
            self._initial.pop(path, None)
            for key in list(self._initial.keys()):
                if key.startswith(path):
                    self._initial.pop(key)
コード例 #39
0
def process_combined_yamls(combined_yamls, split_compiler_factory):
    #get the splits info
    id_to_split_names, split_to_ids = get_id_to_split_names(
        combined_yamls[RootKeys.keys.splits])

    #initialize the data for split compiler objects using
    #the split information
    split_name_to_compiler = OrderedDict()
    for split_name in split_to_ids:
        split_name_to_compiler[split_name] =\
            split_compiler_factory(split_name=split_name,
                                    ids_in_split=split_to_ids[split_name])

    #define the action that will get applied to new labels
    def labels_action(output_mode, the_id, labels):
        split_names = id_to_split_names[the_id]
        for split_name in split_names:
            split_name_to_compiler[split_name].add_labels(
                the_id=the_id, output_mode=output_mode, labels=labels)

    def set_label_names_action(output_mode, label_names):
        for split_name in split_to_ids:
            split_name_to_compiler[split_name].set_label_names(
                output_mode=output_mode, label_names=label_names)

    process_labels_with_labels_action(
        labels_objects=combined_yamls[RootKeys.keys.labels],
        labels_action=labels_action,
        set_label_names_action=set_label_names_action)

    #define the action that will get applied to new features
    def features_action(input_mode, the_id, features):
        split_names = id_to_split_names[the_id]
        for split_name in split_names:
            split_name_to_compiler[split_name].add_features(
                the_id=the_id, input_mode=input_mode, features=features)

    process_features_with_features_action(
        features_objects=combined_yamls[RootKeys.keys.features],
        features_action=features_action)

    #define the action that will be applied to new weights
    def weights_action(output_mode, the_id, weights):
        split_names = id_to_split_names[the_id]
        for split_name in split_names:
            split_name_to_compiler[split_name].add_weights(
                the_id=the_id, weights=weights, output_mode=output_mode)

    def set_weight_names_action(output_mode, weight_names):
        for split_name in split_to_ids:
            split_name_to_compiler[split_name].set_weight_names(
                output_mode=output_mode, weight_names=weight_names)

    #TODO: implement weights action
    process_weights_with_weights_action(
        weights_objects=combined_yamls[RootKeys.keys.weights],
        weights_action=weights_action,
        set_weight_names_action=set_weight_names_action)

    for compiler in split_name_to_compiler.values():
        compiler.finalize()

    return split_name_to_compiler
コード例 #40
0
ファイル: market.py プロジェクト: iForeteller/paper_trading
class ChinaAMarket(Exchange):
    """中国A股交易市场"""

    def __init__(self, event_engine, mode):
        super(ChinaAMarket, self).__init__(event_engine, mode)

        self.market_name = "china_a_market"            # 交易市场名称
        self._active = False
        self.hq_client = TushareService()              # 行情源
        self.db = None                                 # 数据库实例
        self.exchange_symbols = ["SH", "SZ"]           # 交易市场标识
        self.match_mode = mode                         # 模拟交易引擎类型
        self.turnover_mode = TradeType.T_PLUS1.value   # 回转交易模式
        self.verification = OrderedDict()              # 验证清单
        # 真实环境下使用订单薄
        self.orders_book = OrderedDict()
        # 模拟环境下使用订单队列
        self.orders_queue = Queue()

    def on_init(self):
        """初始化"""
        # 开启交易撮合开关
        self._active = True

        # 绑定交易市场名称,用于订单薄接收订单
        SETTINGS["MARKET_NAME"] = self.market_name

        # 注册验证程序
        if self.match_mode == EngineMode.REALTIME.value:
            self.verification = {
                "1": self.product_verification,
                "2": self.price_verification,
            }
            return self.on_orders_arrived_realtime

        else:
            self.verification = {
                "1": self.product_verification,
            }
            return self.on_orders_arrived_simulation

    def on_match(self, db):
        """交易撮合"""
        self.db = db
        self.write_log("{}:交易市场已开启".format(self.market_name))

        try:
            if self.match_mode == EngineMode.REALTIME.value:
                self.on_realtime_match()
            else:
                self.on_simulation_match()

        except Exception as e:
            event = Event(EVENT_ERROR, traceback.format_exc())
            self.event_engine.put(event)

    def on_realtime_match(self):
        """实时交易撮合"""
        self.write_log("{}:真实行情".format(self.market_name))

        # 行情连接
        self.hq_client.connect_api()

        # 加载当日未成交的订单
        self.load_orders_today_notrade()

        while self._active:
            # 交易时间检验
            if not self.time_verification():
                continue

            if not self.orders_book:
                continue

            for order in self.orders_book.values():
                # 订单撮合
                self.on_orders_match(order)
            sleep(3)

    def on_simulation_match(self):
        """模拟交易撮合"""
        self.write_log("{}:模拟行情".format(self.market_name))

        while self._active:
            if self.orders_queue.empty():
                continue

            order = self.orders_queue.get(block=True)

            # 模拟清算
            if order.order_type == OrderType.LIQ.value:
                on_liquidation(
                    self.db,
                    order.account_id,
                    order.order_date,
                    {order.pt_symbol: order.order_price}
                )
                continue

            # 订单成交
            self.on_orders_deal(order)

    def on_orders_arrived_realtime(self, order):
        """订单到达-真实行情"""
        order_id = order.order_id

        # 取消订单的处理
        if order.order_type == OrderType.CANCEL.value:
            if self.orders_book.get(order_id):
                del self.orders_book[order_id]
                on_order_cancel(order.account_id, order_id, self.db)
                return True
            else:
                return False
        # 过滤掉清算订单
        elif order.order_type == OrderType.LIQ.value:
            return False
        else:
            # 订单验证
            if not self.on_back_verification(order):
                return False
            else:
                # 将订单添加到订单薄
                self.orders_book[order_id] = order
                return True

    def on_orders_arrived_simulation(self, order):
        """订单到达-模拟行情"""
        # 过滤掉取消订单
        if order.order_type == OrderType.CANCEL.value:
            return False

        # 订单验证
        if not self.on_back_verification(order):
            return False

        # 订单推送到订单撮合引擎
        self.orders_queue.put(order)

    def on_orders_match(self, order: Order):
        """订单撮合"""
        hq = self.hq_client.get_realtime_data(order.pt_symbol)

        if hq is not None:
            now_price = float(hq.loc[0, "price"])
            if order.price_type == PriceType.MARKET.value:
                order.order_price = now_price
                # 订单成交
                self.on_orders_deal(order)
                return

            elif order.price_type == PriceType.LIMIT.value:
                if order.order_type == OrderType.BUY.value:
                    if order.order_price >= now_price:
                        if order.status == Status.SUBMITTING.value:
                            order.trade_price = now_price
                        # 订单成交
                        self.on_orders_deal(order)
                        return
                else:
                    if order.order_price <= now_price:
                        if order.status == Status.SUBMITTING.value:
                            order.trade_price = now_price
                        # 订单成交
                        self.on_orders_deal(order)
                        return

            # 没有成交更新订单状态
            self.on_orders_status_modify(order)

    def on_orders_deal(self, order: Order):
        """订单成交"""
        if not order.trade_price:
            order.trade_price = order.order_price
        order.traded = order.volume
        order.trade_type = self.turnover_mode

        on_order_deal(order, self.db)

        self.write_log(
            "处理订单:账户:{}, 订单号:{}, 结果:{}".format(
                order.account_id,
                order.order_id,
                "全部成交"))

    def on_orders_book_rejected_all(self):
        """拒绝所有订单"""
        if self.orders_book:
            for order in self.orders_book.values():
                order.status = Status.REJECTED.value
                order.error_msg = "交易关闭,自动拒单"

                on_order_refuse(order, self.db)

                self.write_log(
                    "处理订单:账户:{}, 订单号:{}, 结果:{}".format(
                        order.account_id,
                        order.order_id,
                        order.error_msg))

    def on_orders_status_modify(self, order):
        """更新订单状态"""
        raw_data = {}
        raw_data['flt'] = {'order_id': order.order_id}
        raw_data["set"] = {'$set': {'status': Status.NOTTRADED.value}}
        db_data = DBData(
            db_name=SETTINGS['ORDERS_BOOK'],
            db_cl=self.market_name,
            raw_data=raw_data
        )

        return self.db.on_update(db_data)

    def load_orders_today_notrade(self):
        """查询当日未成交的订单"""
        account_list = query_account_list(self.db)

        for account_id in account_list:
            orders = query_orders_today(account_id, self.db)
            if isinstance(orders, list):
                for order in orders:
                    if order['status'] in [Status.NOTTRADED.value, Status.PARTTRADED.value]:
                        order = order_generate(order)
                        self.orders_book[order.order_id] = order

    def on_back_verification(self, order: Order):
        """后端验证"""
        for k, verification in self.verification.items():
            result, msg = verification(order)

            if not result:
                order.status = Status.REJECTED.value
                order.error_msg = msg
                on_order_refuse(order, self.db)

                self.write_log(
                    "处理订单:账户:{}, 订单号:{}, 结果:{}".format(
                        order.account_id, order.order_id, msg))

                return False

        return True

    def time_verification(self):
        """交易时间验证"""
        result = True
        now = datetime.now().time()
        time_dict = {
            "1": (time(9, 15), time(11, 30)),
            "2": (time(13, 0), time(15, 0))
        }
        for k, time_check in time_dict.items():
            if not (now >= time_check[0] and now <= time_check[1]):
                result = False

        if now >= time(15, 1):
            # 市场关闭
            self.on_close()
            result = False

        return result

    def product_verification(self, order: Order):
        """交易产品验证"""
        if order.exchange in self.exchange_symbols:
            return True, ""
        else:
            return False, "交易品种不符"

    def price_verification(self, order: Order):
        """价格验证"""
        return True, ""

    def on_close(self):
        """模拟交易市场关闭"""
        # 阻止接收新订单
        SETTINGS["MARKET_NAME"] = ""

        # 关闭市场撮合
        self._active = False

        # 模拟交易结束,拒绝所有未成交的订单
        self.on_orders_book_rejected_all()

        # 清算
        self.liquidation()

        # 关闭行情接口
        self.hq_client.close()

        # 推送关闭事件
        event = Event(EVENT_MARKET_CLOSE, self.market_name)
        self.event_engine.put(event)

    def liquidation(self):
        """获取收盘数据"""
        tokens = query_account_list(self.db)
        today = datetime.now().strftime("%Y%m%d")

        if tokens:
            for token in tokens:
                pos_list = query_position(token, self.db)
                if isinstance(pos_list, list):
                    for pos in pos_list:
                        hq = self.hq_client.get_realtime_data(pos["pt_symbol"])
                        if hq is not None:
                            now_price = float(hq.loc[0, "price"])
                            # 更新收盘行情
                            on_position_update_price(token, pos, now_price, self.db)
                # 清算
                on_liquidation(self.db, token, today)
        self.write_log("{}: 账户与持仓清算完成".format(self.market_name))

    def write_log(self, msg: str, level: int = INFO):
        """"""
        log = LogData(
            log_content=msg,
            log_level=level
        )
        event = Event(EVENT_LOG, log)
        self.event_engine.put(event)
コード例 #41
0
def autotune(operator, args, level, mode):
    """
    Operator autotuning.

    Parameters
    ----------
    operator : Operator
        Input Operator.
    args : dict_like
        The runtime arguments with which `operator` is run.
    level : str
        The autotuning aggressiveness (basic, aggressive, max). A more
        aggressive autotuning might eventually result in higher runtime
        performance, but the autotuning phase will take longer.
    mode : str
        The autotuning mode (preemptive, runtime). In preemptive mode, the
        output runtime values supplied by the user to `operator.apply` are
        replaced with shadow copies.
    """
    key = [level, mode]
    accepted = configuration._accepted['autotuning']
    if key not in accepted:
        raise ValueError("The accepted `(level, mode)` combinations are `%s`; "
                         "provided `%s` instead" % (accepted, key))

    # We get passed all the arguments, but the cfunction only requires a subset
    at_args = OrderedDict([(p.name, args[p.name])
                           for p in operator.parameters])

    # User-provided output data won't be altered in `preemptive` mode
    if mode == 'preemptive':
        output = {i.name: i for i in operator.output}
        copies = {
            k: output[k]._C_as_ndarray(v).copy()
            for k, v in args.items() if k in output
        }
        # WARNING: `copies` keeps references to numpy arrays, which is required
        # to avoid garbage collection to kick in during autotuning and prematurely
        # free the shadow copies handed over to C-land
        at_args.update(
            {k: output[k]._C_make_dataobj(v)
             for k, v in copies.items()})

    # Disable halo exchanges through MPI_PROC_NULL
    if mode in ['preemptive', 'destructive']:
        for p in operator.parameters:
            if isinstance(p, MPINeighborhood):
                at_args.update(MPINeighborhood(p.fields)._arg_values())
                for i in p.fields:
                    setattr(at_args[p.name]._obj, i, MPI.PROC_NULL)
            elif isinstance(p, MPIMsgEnriched):
                at_args.update(
                    MPIMsgEnriched(p.name, p.function, p.halos)._arg_values())
                for i in at_args[p.name]:
                    i.fromrank = MPI.PROC_NULL
                    i.torank = MPI.PROC_NULL

    roots = [operator.body] + [i.root for i in operator._func_table.values()]
    trees = filter_ordered(retrieve_iteration_tree(roots),
                           key=lambda i: i.root)

    # Detect the time-stepping Iteration; shrink its iteration range so that
    # each autotuning run only takes a few iterations
    steppers = {i for i in flatten(trees) if i.dim.is_Time}
    if len(steppers) == 0:
        stepper = None
        timesteps = 1
    elif len(steppers) == 1:
        stepper = steppers.pop()
        timesteps = init_time_bounds(stepper, at_args)
        if timesteps is None:
            return args, {}
    else:
        warning(
            "cannot perform autotuning unless there is one time loop; skipping"
        )
        return args, {}

    # Perform autotuning
    timings = {}
    for n, tree in enumerate(trees):
        blockable = [i.dim for i in tree if isinstance(i.dim, BlockDimension)]

        # Tunable arguments
        try:
            tunable = []
            tunable.append(generate_block_shapes(blockable, args, level))
            tunable.append(generate_nthreads(operator.nthreads, args, level))
            tunable = list(product(*tunable))
        except ValueError:
            # Some arguments are cumpolsory, otherwise autotuning is skipped
            continue

        # Symbolic number of loop-blocking blocks per thread
        nblocks_per_thread = calculate_nblocks(tree,
                                               blockable) / operator.nthreads

        for bs, nt in tunable:
            # Can we safely autotune over the given time range?
            if not check_time_bounds(stepper, at_args, args, mode):
                break

            # Update `at_args` to use the new tunable arguments
            run = [(k, v) for k, v in bs + nt if k in at_args]
            at_args.update(dict(run))

            # Drop run if not at least one block per thread
            if not configuration['develop-mode'] and nblocks_per_thread.subs(
                    at_args) < 1:
                continue

            # Make sure we remain within stack bounds, otherwise skip run
            try:
                stack_footprint = operator._mem_summary['stack']
                if int(evaluate(stack_footprint, **
                                at_args)) > options['stack_limit']:
                    continue
            except TypeError:
                warning("couldn't determine stack size; skipping run %s" %
                        str(i))
                continue
            except AttributeError:
                assert stack_footprint == 0

            # Run the Operator
            operator.cfunction(*list(at_args.values()))
            elapsed = operator._profiler.timer.total

            timings.setdefault(nt, OrderedDict()).setdefault(n,
                                                             {})[bs] = elapsed
            log("run <%s> took %f (s) in %d timesteps" %
                (','.join('%s=%s' % i for i in run), elapsed, timesteps))

            # Prepare for the next autotuning run
            update_time_bounds(stepper, at_args, timesteps, mode)

            # Reset profiling timers
            operator._profiler.timer.reset()

    # The best variant is the one that for a given number of threads had the minium
    # turnaround time
    try:
        runs = 0
        mapper = {}
        for k, v in timings.items():
            for i in v.values():
                runs += len(i)
                record = mapper.setdefault(k, Record())
                record.add(min(i, key=i.get), min(i.values()))
        best = min(mapper, key=mapper.get)
        best = OrderedDict(best + tuple(mapper[best].args))
        best.pop(None, None)
        log("selected <%s>" % (','.join('%s=%s' % i for i in best.items())))
    except ValueError:
        warning("couldn't perform any runs")
        return args, {}

    # Update the argument list with the tuned arguments
    args.update(best)

    # In `runtime` mode, some timesteps have been executed already, so we must
    # adjust the time range
    finalize_time_bounds(stepper, at_args, args, mode)

    # Autotuning summary
    summary = {}
    summary['runs'] = runs
    summary['tpr'] = timesteps  # tpr -> timesteps per run
    summary['tuned'] = dict(best)

    return args, summary
コード例 #42
0
class ParameterDict(object):
    """A dictionary managing a set of parameters.

    Parameters
    ----------
    prefix : str, default ``''``
        The prefix to be prepended to all Parameters' names created by this dict.
    shared : ParameterDict or None
        If not ``None``, when this dict's :py:meth:`get` method creates a new parameter, will
        first try to retrieve it from "shared" dict. Usually used for sharing
        parameters with another Block.
    """
    def __init__(self, prefix='', shared=None):
        self._prefix = prefix
        self._params = OrderedDict()
        self._shared = shared

    def __repr__(self):
        s = '{name}(\n{content}\n)'
        name = self._prefix + ' ' if self._prefix else ''
        return s.format(name=name,
                        content='\n'.join([
                            _indent('  {0}'.format(v), 2)
                            for v in self.values()
                        ]))

    def __getitem__(self, key):
        return self._params[key]

    def __iter__(self):
        return iter(self._params)

    def items(self):
        return self._params.items()

    def keys(self):
        return self._params.keys()

    def values(self):
        return self._params.values()

    @property
    def prefix(self):
        """Prefix of this dict. It will be prepended to :py:class:`Parameter`s' name created
        with :py:func:`get`."""
        return self._prefix

    def _get_impl(self, name):
        if name in self._params:
            return self._params[name]
        if self._shared is not None and name in self._shared._params:
            self._params[name] = self._shared._params[name]
            return self._shared._params[name]
        return None

    def get(self, name, **kwargs):
        """Retrieves a :py:class:`Parameter` with name ``self.prefix+name``. If not found,
        :py:func:`get` will first try to retrieve it from "shared" dict. If still not
        found, :py:func:`get` will create a new :py:class:`Parameter` with key-word arguments and
        insert it to self.

        Parameters
        ----------
        name : str
            Name of the desired Parameter. It will be prepended with this dictionary's
            prefix.
        **kwargs : dict
            The rest of key-word arguments for the created :py:class:`Parameter`.

        Returns
        -------
        Parameter
            The created or retrieved :py:class:`Parameter`.
        """
        name = self.prefix + name
        param = self._get_impl(name)
        if param is None:  # pylint: disable=too-many-nested-blocks
            param = Parameter(name, **kwargs)
            self._params[name] = param
        else:
            for k, v in kwargs.items():
                if hasattr(param, k) and getattr(param, k) is not None:
                    existing = getattr(param, k)
                    if k == 'shape' and len(v) == len(existing):
                        inferred_shape = []
                        matched = True
                        for dim1, dim2 in zip(v, existing):
                            if dim1 != dim2 and dim1 > 0 and dim2 > 0:
                                matched = False
                                break
                            elif dim1 == dim2:
                                inferred_shape.append(dim1)
                            elif dim1 in (
                                    0, -1
                            ):  # -1 means unknown dim size in np_shape mode
                                inferred_shape.append(dim2)
                            else:
                                inferred_shape.append(dim1)

                        if matched:
                            param._shape = tuple(inferred_shape)
                            continue
                    elif k == 'dtype' and np.dtype(v) == np.dtype(existing):
                        continue

                    assert v is None or v == existing, \
                        "Cannot retrieve Parameter '%s' because desired attribute " \
                        "does not match with stored for attribute '%s': " \
                        "desired '%s' vs stored '%s'."%(
                            name, k, str(v), str(getattr(param, k)))
                else:
                    setattr(param, k, v)
        return param

    def get_constant(self, name, value=None):
        """Retrieves a :py:class:`.Constant` with name ``self.prefix+name``. If not found,
        :py:func:`get` will first try to retrieve it from "shared" dict. If still not
        found, :py:func:`get` will create a new :py:class:`.Constant` with key-word
        arguments and insert it to self.

        Parameters
        ----------
        name : str
            Name of the desired Constant. It will be prepended with this dictionary's
            prefix.
        value : array-like
            Initial value of constant.

        Returns
        -------
        :py:class:`.Constant`
            The created or retrieved :py:class:`.Constant`.
        """
        name = self.prefix + name
        param = self._get_impl(name)
        if param is None:
            if value is None:
                raise KeyError("No constant named '{}'. Please specify value " \
                               "if you want to create a new constant.".format(
                                   name))
            param = Constant(name, value)
            self._params[name] = param
        elif value is not None:
            assert isinstance(param, Constant), \
                "Parameter '{}' already exists but it is not a constant.".format(
                    name)
            if isinstance(value, ndarray.NDArray):
                value = value.asnumpy()
            assert param.shape == value.shape and \
                (param.value.asnumpy() == value).all(), \
                "Constant '{}' already exists but it's value doesn't match new " \
                "value".format(name)
        return param

    def update(self, other):
        """Copies all Parameters in ``other`` to self."""
        for k, v in other.items():
            if k in self._params:
                assert self._params[k] is v, \
                    "Cannot update self with other because they have different " \
                    "Parameters with the same name '%s'"%k

        for k, v in other.items():
            self._params[k] = v

    def initialize(self,
                   init=initializer.Uniform(),
                   ctx=None,
                   verbose=False,
                   force_reinit=False):
        """Initializes all Parameters managed by this dictionary to be used for :py:class:`NDArray`
        API. It has no effect when using :py:class:`Symbol` API.

        Parameters
        ----------
        init : Initializer
            Global default Initializer to be used when :py:meth:`Parameter.init` is ``None``.
            Otherwise, :py:meth:`Parameter.init` takes precedence.
        ctx : Context or list of Context
            Keeps a copy of Parameters on one or many context(s).
        verbose : bool, default False
            Whether to verbosely print out details on initialization.
        force_reinit : bool, default False
            Whether to force re-initialization if parameter is already initialized.
        """
        if verbose:
            init.set_verbosity(verbose=verbose)
        for _, v in self.items():
            v.initialize(None, ctx, init, force_reinit=force_reinit)

    def zero_grad(self):
        """Sets all Parameters' gradient buffer to 0."""
        # collect gradient arrays for each ctx
        arrays = defaultdict(list)
        for p in self.values():
            if p.grad_req == 'null' or p._grad is None:
                continue
            for g in p.list_grad():
                if g.stype == 'row_sparse':
                    mx.ndarray.zeros_like(g, out=g)
                else:
                    arrays[g.context].append(g)

        if len(arrays) == 0:
            return

        for arr in arrays.values():
            mx.nd.reset_arrays(*arr, num_arrays=len(arr))

    def reset_ctx(self, ctx):
        """Re-assign all Parameters to other contexts.

        Parameters
        ----------
        ctx : Context or list of Context, default :py:meth:`context.current_context()`.
            Assign Parameter to given context. If ctx is a list of Context, a
            copy will be made for each context.
        """
        for i in self.values():
            i.reset_ctx(ctx)

    def list_ctx(self):
        """Returns a list of all the contexts on which the underlying Parameters
        are initialized."""
        s = set()
        for i in self.values():
            s.update(i.list_ctx())
        return list(s)

    def setattr(self, name, value):
        """Set an attribute to a new value for all Parameters.

        For example, set grad_req to null if you don't need gradient w.r.t a
        model's Parameters::

            model.collect_params().setattr('grad_req', 'null')

        or change the learning rate multiplier::

            model.collect_params().setattr('lr_mult', 0.5)

        Parameters
        ----------
        name : str
            Name of the attribute.
        value : valid type for attribute name
            The new value for the attribute.
        """
        for i in self.values():
            setattr(i, name, value)

    def save(self, filename, strip_prefix=''):
        """Save parameters to file.

        Parameters
        ----------
        filename : str
            Path to parameter file.
        strip_prefix : str, default ''
            Strip prefix from parameter names before saving.
        """
        arg_dict = {}
        for param in self.values():
            weight = param._reduce()
            if not param.name.startswith(strip_prefix):
                raise ValueError(
                    "Prefix '%s' is to be striped before saving, but Parameter's "
                    "name '%s' does not start with '%s'. "
                    "this may be due to your Block shares parameters from other "
                    "Blocks or you forgot to use 'with name_scope()' when creating "
                    "child blocks. For more info on naming, please see "
                    "https://mxnet.apache.org/api/python/docs/tutorials/packages/gluon/naming.html"
                    % (strip_prefix, param.name, strip_prefix))
            arg_dict[param.name[len(strip_prefix):]] = weight
        ndarray.save(filename, arg_dict)

    def load(self,
             filename,
             ctx=None,
             allow_missing=False,
             ignore_extra=False,
             restore_prefix='',
             cast_dtype=False,
             dtype_source="current"):
        """Load parameters from file.

        Parameters
        ----------
        filename : str
            Path to parameter file.
        ctx : Context or list of Context
            Context(s) initialize loaded parameters on.
        allow_missing : bool, default False
            Whether to silently skip loading parameters not represents in the file.
        ignore_extra : bool, default False
            Whether to silently ignore parameters from the file that are not
            present in this ParameterDict.
        restore_prefix : str, default ''
            prepend prefix to names of stored parameters before loading.
        cast_dtype : bool, default False
            Cast the data type of the parameter
        dtype_source : str, default 'current'
            must be in {'current', 'saved'}
            Only valid if cast_dtype=True, specify the source of the dtype for casting
            the parameters
        """
        if restore_prefix:
            for name in self.keys():
                assert name.startswith(restore_prefix), \
                    "restore_prefix is '%s' but Parameters name '%s' does not start " \
                    "with '%s'"%(restore_prefix, name, restore_prefix)
        ndarray_load = ndarray.load(filename)
        self.load_dict(ndarray_load, ctx, allow_missing, ignore_extra,
                       restore_prefix, filename, cast_dtype, dtype_source)

    def load_dict(self,
                  param_dict,
                  ctx=None,
                  allow_missing=False,
                  ignore_extra=False,
                  restore_prefix='',
                  filename=None,
                  cast_dtype=False,
                  dtype_source="current"):
        """Load parameters from dict

        Parameters
        ----------
        param_dict : dict
            Dictionary containing model parameters, preprended with arg: and aux: names
        ctx : Context or list of Context
            Context(s) initialize loaded parameters on.
        allow_missing : bool, default False
            Whether to silently skip loading parameters not represented in the file.
        ignore_extra : bool, default False
            Whether to silently ignore parameters from the file that are not
            present in this ParameterDict.
        restore_prefix : str, default ''
            prepend prefix to names of stored parameters before loading
        filename : str, default None
        cast_dtype : bool, default False
            Cast the data type of the NDArray loaded from the checkpoint to the dtype
            provided by the Parameter if any
        """
        lprefix = len(restore_prefix)
        loaded = [(k[4:] if k.startswith('arg:') or k.startswith('aux:') else k, v) \
                  for k, v in param_dict.items()] if isinstance(param_dict, dict) else param_dict
        arg_dict = {restore_prefix + k: v for k, v in loaded}
        error_str = "file: %s" % (filename) if filename else "param_dict"
        if not allow_missing:
            for name in self.keys():
                assert name in arg_dict, \
                    "Parameter '%s' is missing in %s, which contains parameters: %s. " \
                    "Please make sure source and target networks have the same prefix."%(
                        name[lprefix:], error_str, _brief_print_list(arg_dict.keys()))
        for name in arg_dict:
            if name not in self._params:
                assert ignore_extra, \
                    "Parameter '%s' loaded from %s is not present in ParameterDict, " \
                    "choices are: %s. Set ignore_extra to True to ignore. " \
                    "Please make sure source and target networks have the same prefix."%(
                        name[lprefix:], error_str, _brief_print_list(self._params.keys()))
                continue
            self[name]._load_init(arg_dict[name],
                                  ctx,
                                  cast_dtype=cast_dtype,
                                  dtype_source=dtype_source)
コード例 #43
0
ファイル: huarongdao.py プロジェクト: wmh123456789/pygame
class Logic:
    def __init__(self, shape=4):
        self.shape = int(shape) if shape > 2 else 4  # 初始化形状
        self.tiles = OrderedDict()  # 初始化数据
        self.stepCnt = 0
        self.neighbors = [  # 定义方向矢量
            [1, 0],  # 下
            [-1, 0],  # 上
            [0, 1],  # 右
            [0, -1],  # 左
        ]
        self.click_dict = {'x': {}, 'y': {}}  # 定义鼠标点击坐标转换下标的数据
        self.init_load()  # 初始化加载

    def __str__(self):
        game_str = 'Game after {} steps \n'.format(self.stepCnt)
        for row in range(self.shape):
            line_str = ''
            for col in range(self.shape):
                line_str += '{:02} '.format(self.tiles[(row, col)])
            game_str += line_str + '\n'
        return game_str

    def init_load(self):
        count = 1
        # 生成正确的序列
        for x in range(self.shape):
            for y in range(self.shape):
                mark = tuple([x, y])
                self.tiles[mark] = count
                count += 1
        self.tiles[mark] = 0
        self.empty = mark
        self.stepCnt = 0


    def shuffleTiles(self, n=200):
        mark = self.empty
        for count in range(n):  # 随机移动n次
            neighbor = random.choice(self.neighbors)
            spot = tuple_add(mark, neighbor)

            if spot in self.tiles:
                number = self.tiles[spot]
                self.tiles[spot] = 0
                self.tiles[mark] = number
                mark = spot
        self.init_click_dict()
        self.empty = mark

    def init_click_dict(self):
        # 初始化点击坐标转换下标的数据
        for r in range(self.shape):
            for c in range(self.shape):
                x = MARGIN * (c + 1) + c * CELL_SIZE
                x1 = x + CELL_SIZE
                click_x = tuple(range(x, x1))

                self.click_dict['x'][click_x] = c
                y = MARGIN * (r + 1) + r * CELL_SIZE
                y1 = y + CELL_SIZE
                click_y = tuple(range(y, y1))
                self.click_dict['y'][click_y] = r

    # move for click
    def move(self, mark):
        # 移动数据
        for neighbor in self.neighbors:
            spot = tuple_add(mark, neighbor)
            # If click is available, exchange tile of clicked one and empty one
            if spot in self.tiles and self.tiles[spot] is 0:
                self.tiles[spot], self.tiles[mark] = self.tiles[mark], self.tiles[spot]
                self.empty = mark
                self.stepCnt += 1
                break

    # move for keydown
    def move2(self, neighbor):
        spot = tuple_add(neighbor, self.empty)
        if spot in self.tiles:
            self.tiles[spot], self.tiles[self.empty] = self.tiles[self.empty], self.tiles[spot]
            self.empty = spot
            self.stepCnt += 1
    pass

    def click_to_move(self, x, y):
        # 点击移动
        x1 = None
        for k, v in self.click_dict['x'].items():
            if x in k:
                x1 = v

        if x1 is None:
            return
        y1 = None
        for k, v in self.click_dict['y'].items():
            if y in k:
                y1 = v

        if y1 is None:
            return
        self.move((y1, x1))

    def key_to_move(self, direction):
        if direction in ['L', 'left']:
            self.move2((0, 1))
        elif direction in ['R', 'right']:
            self.move2((0, -1))
        elif direction in ['U', 'up']:
            self.move2((1, 0))
        elif direction in ['D', 'down']:
            self.move2((-1, 0))
        else:
            print("Invalid direction: {}".format(direction))
        pass

    def is_win(self):
        # 游戏结束判定
        if self.tiles[(self.shape - 1, self.shape - 1)] is not 0:
            return False
        values = list(self.tiles.values())
        for index in range(values.__len__() - 1):
            if index + 1 != values[index]:
                return False
        return True
コード例 #44
0
ファイル: siamesetracker.py プロジェクト: harsul/SiamYolact
class SiameseTracker:
    def __init__(self, maxDisappeared=3):
        # initialize the next unique object ID along with two ordered
        # dictionaries used to keep track of mapping a given object
        # ID to its centroid and number of consecutive frames it has
        # been marked as "disappeared", respectively
        self.nextObjectID = 0
        self.objects = OrderedDict()
        self.disappeared = OrderedDict()

        # store the number of maximum consecutive frames a given
        # object is allowed to be marked as "disappeared" until we
        # need to register the object from tracking
        self.maxDisappeared = maxDisappeared

    def register(self, centroid):
        # when registering an object we use the next available object
        # ID to store the centroid
        self.objects[self.nextObjectID] = centroid
        self.disappeared[self.nextObjectID] = 0
        self.nextObjectID += 1

    def deregister(self, objectID):
        # to register an object ID we delete the object ID from
        # both of our respective dictionaries
        del self.objects[objectID]
        del self.disappeared[objectID]

    def update(self, rects):
        # check to see if the list of input bounding box rectangles
        # is empty
        if len(rects) == 0:
            # loop over any existing tracked objects and mark them
            # as disappeared
            for objectID in self.disappeared.keys():
                self.disappeared[objectID] += 1

                # if we have reached a maximum number of consecutive
                # frames where a given object has been marked as
                # missing, register it
                if self.disappeared[objectID] > self.maxDisappeared:
                    self.deregister(objectID)

            # return early as there are no centroids or tracking info
            # to update
            return self.objects

        # initialize an array of input centroids for the current frame
        inputCentroids = np.zeros((len(rects), 4), dtype="int")

        # loop over the bounding box rectangles
        for (i, (startX, startY, endX, endY)) in enumerate(rects):
            # use the bounding box coordinates to derive the centroid
            cX = int((startX + endX) / 2.0)
            cY = int((startY + endY) / 2.0)
            inputCentroids[i] = (startX, startY, endX, endY)

        # if we are currently not tracking any objects take the input
        # centroids and register each of them
        if len(self.objects) == 0:
            for i in range(0, len(inputCentroids)):
                self.register(inputCentroids[i])

        # otherwise, are are currently tracking objects so we need to
        # try to match the input centroids to existing object
        # centroids
        else:
            # grab the set of object IDs and corresponding centroids
            objectIDs = list(self.objects.keys())
            objectCentroids = list(self.objects.values())

            # compute the distance between each pair of object
            # centroids and input centroids, respectively -- our
            # goal will be to match an input centroid to an existing
            # object centroid
            D = dist.cdist(np.array(objectCentroids), inputCentroids)

            # in order to perform this matching we must (1) find the
            # smallest value in each row and then (2) sort the row
            # indexes based on their minimum values so that the row
            # with the smallest value as at the *front* of the index
            # list
            rows = D.min(axis=1).argsort()

            # next, we perform a similar process on the columns by
            # finding the smallest value in each column and then
            # sorting using the previously computed row index list
            cols = D.argmin(axis=1)[rows]

            # in order to determine if we need to update, register,
            # or deregister an object we need to keep track of which
            # of the rows and column indexes we have already examined
            usedRows = set()
            usedCols = set()

            # loop over the combination of the (row, column) index
            # tuples
            for (row, col) in zip(rows, cols):
                # if we have already examined either the row or
                # column value before, ignore it
                # val
                if row in usedRows or col in usedCols:
                    continue

                # otherwise, grab the object ID for the current row,
                # set its new centroid, and reset the disappeared
                # counter
                objectID = objectIDs[row]
                self.objects[objectID] = inputCentroids[col]
                self.disappeared[objectID] = 0

                # indicate that we have examined each of the row and
                # column indexes, respectively
                usedRows.add(row)
                usedCols.add(col)

            # compute both the row and column index we have NOT yet
            # examined
            unusedRows = set(range(0, D.shape[0])).difference(usedRows)
            unusedCols = set(range(0, D.shape[1])).difference(usedCols)

            # in the event that the number of object centroids is
            # equal or greater than the number of input centroids
            # we need to check and see if some of these objects have
            # potentially disappeared
            if D.shape[0] >= D.shape[1]:
                # loop over the unused row indexes
                for row in unusedRows:
                    # grab the object ID for the corresponding row
                    # index and increment the disappeared counter
                    objectID = objectIDs[row]
                    self.disappeared[objectID] += 1

                    # check to see if the number of consecutive
                    # frames the object has been marked "disappeared"
                    # for warrants registering the object
                    if self.disappeared[objectID] > self.maxDisappeared:
                        self.deregister(objectID)

            # otherwise, if the number of input centroids is greater
            # than the number of existing object centroids we need to
            # register each new input centroid as a trackable object
            else:
                for col in unusedCols:
                    self.register(inputCentroids[col])

        # return the set of trackable objects
        return self.objects
コード例 #45
0
class RecipeContainer(Observable, Configurable, Validatable):
    """Base class for organizing pieces of a FitRecipe.

    RecipeContainers are hierarchical organizations of Parameters and other
    RecipeContainers. This class provides attribute-access to these contained
    objects.  Parameters and other RecipeContainers can be found within the
    hierarchy with the _locateManagedObject method.

    A RecipeContainer can manage dictionaries for that store various objects.
    These dictionaries can be added to the RecipeContainer using the _manage
    method. RecipeContainer methods that add, remove or retrieve objects will
    work with any managed dictionary. This makes it easy to add new types of
    objects to be contained by a RecipeContainer. By default, the
    RecipeContainer is configured to manage an OrderedDict of Parameter
    objects.

    RecipeContainer is an Observable, and observes its managed objects and
    Parameters. This allows hierarchical calculation elements, such as
    ProfileGenerator, to detect changes in Parameters and Restraints on which
    it may depend.

    Attributes
    name            --  A name for this RecipeContainer. Names should be unique
                        within a RecipeContainer and should be valid attribute
                        names.
    _parameters     --  A managed OrderedDict of contained Parameters.
    __managed       --  A list of managed dictionaries. This is used for
                        attribute access, addition and removal.
    _configobjs     --  A set of configurable objects that must know of
                        configuration changes within this object.

    Properties
    names           --  Variable names (read only). See getNames.
    values          --  Variable values (read only). See getValues.
    """

    names = property(lambda self: self.getNames())
    values = property(lambda self: self.getValues())

    def __init__(self, name):
        Observable.__init__(self)
        Configurable.__init__(self)
        validateName(name)
        self.name = name
        self._parameters = OrderedDict()

        self.__managed = []
        self._manage(self._parameters)

        return

    def _manage(self, d):
        """Manage a dictionary of objects.

        This adds the dictionary to the __managed list. Dictionaries in
        __managed are used for attribute access, addition, and removal.
        """
        self.__managed.append(d)
        return

    def _iterManaged(self):
        """Get iterator over managed objects."""
        return chain(*(d.values() for d in self.__managed))

    def iterPars(self, pattern="", recurse=True):
        """Iterate over the Parameters contained in this object.

        Parameters
        ----------
        pattern : str
            Iterate over parameters with names matching this regular
            expression (all parameters by default).
        recurse : bool
            Recurse into managed objects when True (default).
        """
        regexp = re.compile(pattern)
        for par in list(self._parameters.values()):
            if regexp.search(par.name):
                yield par
        if not recurse:
            return
        # Iterate over objects within the managed dictionaries.
        managed = self.__managed[:]
        managed.remove(self._parameters)
        for m in managed:
            for obj in m.values():
                if hasattr(obj, "iterPars"):
                    for par in obj.iterPars(pattern=pattern):
                        yield par
        return

    def __iter__(self):
        """Iterate over top-level parameters."""
        return self._parameters.itervalues()

    def __len__(self):
        """Get number of top-level parameters."""
        return len(self._parameters)

    def __getitem__(self, idx):
        """Get top-level parameters by index."""
        return self._parameters.values()[idx]

    def __getattr__(self, name):
        """Gives access to the contained objects as attributes."""
        arg = self.get(name)
        if arg is None:
            raise AttributeError(name)
        return arg

    # Ensure there is no __dir__ override in the base class.
    assert (getattr(Observable, '__dir__', None) is getattr(
        Configurable, '__dir__', None) is getattr(Validatable, '__dir__', None)
            is getattr(object, '__dir__', None))

    def __dir__(self):
        "Return sorted list of attributes for this object."
        rv = set(dir(type(self)))
        rv.update(self.__dict__)
        # self.get fetches looks up for items in all managed dictionaries.
        # Add keys from each dictionary in self.__managed.
        rv.update(*self.__managed)
        rv = sorted(rv)
        return rv

    # Needed by __setattr__
    _parameters = OrderedDict()
    __managed = []

    def __setattr__(self, name, value):
        """Parameter access and object checking."""
        if name in self._parameters:
            par = self._parameters[name]
            if isinstance(value, Parameter):
                par.value = value.value
            else:
                par.value = value
            return

        m = self.get(name)
        if m is not None:
            raise AttributeError("Cannot set '%s'" % name)

        super(RecipeContainer, self).__setattr__(name, value)
        return

    def __delattr__(self, name):
        """Delete parameters with del.

        This does not allow deletion of non-parameters, as this may require
        configuration changes that are not yet handled in a general way.
        """
        if name in self._parameters:
            self._removeParameter(self._parameters[name])
            return

        m = self.get(name)
        if m is not None:
            raise AttributeError("Cannot delete '%s'" % name)

        super(RecipeContainer, self).__delattr__(name)
        return

    def get(self, name, default=None):
        """Get a managed object."""
        for d in self.__managed:
            arg = d.get(name)
            if arg is not None:
                return arg

        return default

    def getNames(self):
        """Get the names of managed parameters."""
        return [p.name for p in self._parameters.values()]

    def getValues(self):
        """Get the values of managed parameters."""
        return [p.value for p in self._parameters.values()]

    def _addObject(self, obj, d, check=True):
        """Add an object to a managed dictionary.

        obj     --  The object to be stored.
        d       --  The managed dictionary to store the object in.
        check   --  If True (default), a ValueError is raised an object of the
                    given name already exists.

        Raises ValueError if the object has no name.
        Raises ValueError if the object has the same name as some other managed
        object.
        """

        # Check name
        if not obj.name:
            message = "%s has no name" % obj.__class__.__name__
            raise ValueError(message)

        # Check for extant object in d with same name
        oldobj = d.get(obj.name)
        if check and oldobj is not None:
            message = "%s with name '%s' already exists"%\
                    (obj.__class__.__name__, obj.name)
            raise ValueError(message)

        # Check for object with same name in other dictionary.
        if oldobj is None and self.get(obj.name) is not None:
            message = "Non-%s with name '%s' already exists"%\
                    (obj.__class__.__name__, obj.name)
            raise ValueError(message)

        # Detach the old object, if there is one
        if oldobj is not None:
            oldobj.removeObserver(self._flush)

        # Add the object
        d[obj.name] = obj

        # Observe the object
        obj.addObserver(self._flush)

        # Store this as a configurable object
        self._storeConfigurable(obj)
        return

    def _removeObject(self, obj, d):
        """Remove an object from a managed dictionary.

        Raises ValueError if obj is not part of the dictionary.
        """
        if obj not in d.values():
            m = "'%s' is not part of the %s" % (obj, self.__class__.__name__)
            raise ValueError(m)

        del d[obj.name]
        obj.removeObserver(self._flush)

        return

    def _locateManagedObject(self, obj):
        """Find the location a managed object within the hierarchy.

        obj     --  The object to find.

        Returns a list of objects. The first member of the list is this object,
        and each subsequent member is a sub-object of the previous one.  The
        last entry in the list is obj. If obj cannot be found, the list is
        empty.
        """
        loc = [self]

        # This handles the case that an object is asked to locate itself.
        if obj is self:
            return loc

        for m in self._iterManaged():

            # Check locally for the object
            if m is obj:
                loc.append(obj)
                return loc

            # Check within managed objects
            if hasattr(m, "_locateManagedObject"):

                subloc = m._locateManagedObject(obj)
                if subloc:
                    return loc + subloc

        return []

    def _flush(self, other):
        """Invalidate cached state.

        This will force any observer to invalidate its state. By default this
        does nothing.
        """
        self.notify(other)
        return

    def _validate(self):
        """Validate my state.

        This validates that contained Parameters and managed objects are valid.

        Raises AttributeError if validation fails.
        """
        iterable = chain(self.__iter__(), self._iterManaged())
        self._validateOthers(iterable)
        return
コード例 #46
0
class Market:
    instance = None

    def __init__(self):

        # Init recently used module storage
        serviceMarketRecentlyUsedModules = {
            "pyfaMarketRecentlyUsedModules": []
        }

        self.serviceMarketRecentlyUsedModules = SettingsProvider.getInstance(
        ).getSettings("pyfaMarketRecentlyUsedModules",
                      serviceMarketRecentlyUsedModules)

        # Thread which handles search
        self.searchWorkerThread = SearchWorkerThread()
        self.searchWorkerThread.daemon = True
        self.searchWorkerThread.start()

        # Ship browser helper thread
        self.shipBrowserWorkerThread = ShipBrowserWorkerThread()
        self.shipBrowserWorkerThread.daemon = True
        self.shipBrowserWorkerThread.start()

        # Items' group overrides
        self.customGroups = set()
        # Limited edition ships
        self.les_grp = types_Group()
        self.les_grp.ID = -1
        self.les_grp.name = "Limited Issue Ships"
        self.les_grp.displayName = _t("Limited Issue Ships")
        self.les_grp.published = True
        ships = self.getCategory("Ship")
        self.les_grp.category = ships
        self.les_grp.categoryID = ships.ID
        self.les_grp.description = ""
        self.les_grp.icon = None
        self.ITEMS_FORCEGROUP = {
            "Capsule": self.getGroup("Shuttle"),
            "Opux Luxury Yacht": self.
            les_grp,  # One of those is wedding present at CCP fanfest, another was hijacked from ISD guy during an event
            "Silver Magnate": self.les_grp,  # Amarr Championship prize
            "Gold Magnate": self.les_grp,  # Amarr Championship prize
            "Armageddon Imperial Issue":
            self.les_grp,  # Amarr Championship prize
            "Apocalypse Imperial Issue":
            self.les_grp,  # Amarr Championship prize
            "Guardian-Vexor": self.
            les_grp,  # Illegal rewards for the Gallente Frontier Tour Lines event arc
            "Megathron Federate Issue":
            self.les_grp,  # Reward during Crielere event
            "Raven State Issue": self.les_grp,  # AT4 prize
            "Tempest Tribal Issue": self.les_grp,  # AT4 prize
            "Apotheosis": self.les_grp,  # 5th EVE anniversary present
            "Zephyr": self.les_grp,  # 2010 new year gift
            "Primae": self.les_grp,  # Promotion of planetary interaction
            "Council Diplomatic Shuttle": self.les_grp,  # CSM X celebration
            "Freki": self.les_grp,  # AT7 prize
            "Mimir": self.les_grp,  # AT7 prize
            "Utu": self.les_grp,  # AT8 prize
            "Adrestia": self.les_grp,  # AT8 prize
            "Echelon": self.les_grp,  # 2011 new year gift
            "Malice": self.les_grp,  # AT9 prize
            "Vangel": self.les_grp,  # AT9 prize
            "Cambion": self.les_grp,  # AT10 prize
            "Etana": self.les_grp,  # AT10 prize
            "Chremoas": self.les_grp,  # AT11 prize :(
            "Moracha": self.les_grp,  # AT11 prize
            "Stratios Emergency Responder":
            self.les_grp,  # Issued for Somer Blink lottery
            "Miasmos Quafe Ultra Edition":
            self.les_grp,  # Gift to people who purchased FF HD stream
            "InterBus Shuttle": self.les_grp,
            "Leopard": self.les_grp,  # 2013 new year gift
            "Whiptail": self.les_grp,  # AT12 prize
            "Chameleon": self.les_grp,  # AT12 prize
            "Victorieux Luxury Yacht":
            self.les_grp,  # Worlds Collide prize \o/ chinese getting owned
            "Imp": self.les_grp,  # AT13 prize
            "Fiend": self.les_grp,  # AT13 prize
            "Caedes": self.les_grp,  # AT14 prize
            "Rabisu": self.les_grp,  # AT14 prize
            "Victor": self.les_grp,  # AT15 prize
            "Virtuoso": self.les_grp,  # AT15 prize
            "Hydra": self.les_grp,  # AT16 prize
            "Tiamat": self.les_grp,  # AT16 prize
        }

        self.ITEMS_FORCEGROUP_R = self.__makeRevDict(self.ITEMS_FORCEGROUP)
        for grp, itemNames in self.ITEMS_FORCEGROUP_R.items():
            grp.addItems = list(self.getItem(i) for i in itemNames)
        self.customGroups.add(self.les_grp)

        # List of items which are forcibly published or hidden
        self.ITEMS_FORCEPUBLISHED = {
            "Data Subverter I":
            False,  # Not used in EVE, probably will appear with Dust link
            "QA Cross Protocol Analyzer":
            False,  # QA modules used by CCP internally
            "QA Damage Module": False,
            "QA ECCM": False,
            "QA Immunity Module": False,
            "QA Multiship Module - 10 Players": False,
            "QA Multiship Module - 20 Players": False,
            "QA Multiship Module - 40 Players": False,
            "QA Multiship Module - 5 Players": False,
            "QA Remote Armor Repair System - 5 Players": False,
            "QA Shield Transporter - 5 Players": False,
            "Goru's Shuttle": False,
            "Guristas Shuttle": False,
            "Mobile Decoy Unit":
            False,  # Seems to be left over test mod for deployables
            "Tournament Micro Jump Unit":
            False,  # Normally seen only on tournament arenas
        }

        # do not publish ships that we convert
        for name in conversions.packs['skinnedShips']:
            self.ITEMS_FORCEPUBLISHED[name] = False

        if config.debug:
            # Publish Tactical Dessy Modes if in debug
            # Cannot use GROUPS_FORCEPUBLISHED as this does not force items
            # within group to be published, but rather for the group itself
            # to show up on ship list
            group = self.getGroup("Ship Modifiers", eager="items")
            for item in group.items:
                self.ITEMS_FORCEPUBLISHED[item.name] = True

        # List of groups which are forcibly published
        self.GROUPS_FORCEPUBLISHED = {
            "Prototype Exploration Ship": False
        }  # We moved the only ship from this group to other group anyway

        # Dictionary of items with forced meta groups, uses following format:
        # Item name: (metagroup name, parent type name)
        self.ITEMS_FORCEDMETAGROUP = {
            "'Habitat' Miner I": ("Storyline", "Miner I"),
            "'Wild' Miner I": ("Storyline", "Miner I"),
            "Khanid Navy Torpedo Launcher": ("Faction", "Torpedo Launcher I"),
            "Dread Guristas Standup Variable Spectrum ECM":
            ("Structure Faction", "Standup Variable Spectrum ECM I"),
            "Dark Blood Standup Heavy Energy Neutralizer":
            ("Structure Faction", "Standup Heavy Energy Neutralizer I")
        }
        # Parent type name: set(item names)
        self.ITEMS_FORCEDMETAGROUP_R = {}
        for item, value in list(self.ITEMS_FORCEDMETAGROUP.items()):
            parent = value[1]
            if parent not in self.ITEMS_FORCEDMETAGROUP_R:
                self.ITEMS_FORCEDMETAGROUP_R[parent] = set()
            self.ITEMS_FORCEDMETAGROUP_R[parent].add(item)
        # Dictionary of items with forced market group (service assumes they have no
        # market group assigned in db, otherwise they'll appear in both original and forced groups)
        self.ITEMS_FORCEDMARKETGROUP = {
            "Advanced Cerebral Accelerator":
            2487,  # Implants & Boosters > Booster > Cerebral Accelerators
            "Civilian Hobgoblin":
            837,  # Drones > Combat Drones > Light Scout Drones
            "Civilian Light Missile Launcher":
            640,  # Ship Equipment > Turrets & Launchers > Missile Launchers > Light Missile Launchers
            "Civilian Scourge Light Missile":
            920,  # Ammunition & Charges > Missiles > Light Missiles > Standard Light Missiles
            "Civilian Small Remote Armor Repairer":
            1059,  # Ship Equipment > Hull & Armor > Remote Armor Repairers > Small
            "Civilian Small Remote Shield Booster":
            603,  # Ship Equipment > Shield > Remote Shield Boosters > Small
            "Hardwiring - Zainou 'Sharpshooter' ZMX10":
            1493,  # Implants & Boosters > Implants > Skill Hardwiring > Missile Implants > Implant Slot 06
            "Hardwiring - Zainou 'Sharpshooter' ZMX100":
            1493,  # Implants & Boosters > Implants > Skill Hardwiring > Missile Implants > Implant Slot 06
            "Hardwiring - Zainou 'Sharpshooter' ZMX1000":
            1493,  # Implants & Boosters > Implants > Skill Hardwiring > Missile Implants > Implant Slot 06
            "Hardwiring - Zainou 'Sharpshooter' ZMX11":
            1493,  # Implants & Boosters > Implants > Skill Hardwiring > Missile Implants > Implant Slot 06
            "Hardwiring - Zainou 'Sharpshooter' ZMX110":
            1493,  # Implants & Boosters > Implants > Skill Hardwiring > Missile Implants > Implant Slot 06
            "Hardwiring - Zainou 'Sharpshooter' ZMX1100":
            1493,  # Implants & Boosters > Implants > Skill Hardwiring > Missile Implants > Implant Slot 06
            "Prototype Cerebral Accelerator":
            2487,  # Implants & Boosters > Booster > Cerebral Accelerators
            "Prototype Iris Probe Launcher":
            712,  # Ship Equipment > Scanning Equipment > Scan Probe Launchers
            "Standard Cerebral Accelerator":
            2487,  # Implants & Boosters > Booster > Cerebral Accelerators
        }

        self.ITEMS_FORCEDMARKETGROUP_R = self.__makeRevDict(
            self.ITEMS_FORCEDMARKETGROUP)

        self.FORCEDMARKETGROUP = {
            685: False,  # Ship Equipment > Electronic Warfare > ECCM
            681:
            False,  # Ship Equipment > Electronic Warfare > Sensor Backup Arrays
            1639:
            False,  # Ship Equipment > Fleet Assistance > Command Processors
            2527:
            True,  # Ship Equipment > Hull & Armor > Mutadaptive Remote Armor Repairers - has hasTypes set to 1 while actually having no types
        }

        # Misc definitions
        # 0 is for items w/o meta group
        self.META_MAP = OrderedDict([("faction", frozenset((4, 3, 52))),
                                     ("complex", frozenset((6, ))),
                                     ("officer", frozenset((5, )))])
        nonNormalMetas = set(chain(*self.META_MAP.values()))
        self.META_MAP["normal"] = frozenset(
            (0, *(mg.ID for mg in eos.db.getMetaGroups()
                  if mg.ID not in nonNormalMetas)))
        self.META_MAP.move_to_end("normal", last=False)
        self.META_MAP_REVERSE = {
            sv: k
            for k, v in self.META_MAP.items() for sv in v
        }
        self.META_MAP_REVERSE_GROUPED = {}
        i = 0
        for mgids in self.META_MAP.values():
            for mgid in mgids:
                self.META_MAP_REVERSE_GROUPED[mgid] = i
            i += 1
        self.META_MAP_REVERSE_INDICES = self.__makeReverseMetaMapIndices()
        self.SEARCH_CATEGORIES = (
            "Drone",
            "Module",
            "Subsystem",
            "Charge",
            "Implant",
            "Deployable",
            "Fighter",
            "Structure",
            "Structure Module",
        )
        self.SEARCH_GROUPS = ("Ice Product", "Cargo Container",
                              "Secure Cargo Container",
                              "Audit Log Secure Container",
                              "Freight Container", "Jump Filaments",
                              "Triglavian Space Filaments")
        self.ROOT_MARKET_GROUPS = (
            9,  # Ship Equipment
            1111,  # Rigs
            157,  # Drones
            11,  # Ammunition & Charges
            1112,  # Subsystems
            24,  # Implants & Boosters
            404,  # Deployable Structures
            2202,  # Structure Equipment
            2203,  # Structure Modifications
            2456  # Filaments
        )
        self.SHOWN_MARKET_GROUPS = eos.db.getMarketTreeNodeIds(
            self.ROOT_MARKET_GROUPS)
        self.FIT_CATEGORIES = ['Ship']
        self.FIT_GROUPS = ['Citadel', 'Engineering Complex', 'Refinery']
        # Tell other threads that Market is at their service
        mktRdy.set()

    @classmethod
    def getInstance(cls):
        if cls.instance is None:
            cls.instance = Market()
        return cls.instance

    @staticmethod
    def __makeRevDict(orig):
        """Creates reverse dictionary"""
        rev = {}
        for item, value in list(orig.items()):
            if value not in rev:
                rev[value] = set()
            rev[value].add(item)
        return rev

    def __makeReverseMetaMapIndices(self):
        revmap = {}
        i = 0
        for mgids in self.META_MAP.values():
            for mgid in mgids:
                revmap[mgid] = i
            i += 1
        return revmap

    @staticmethod
    def getItem(identity, *args, **kwargs):
        """Get item by its ID or name"""
        try:
            if isinstance(identity, types_Item):
                item = identity
            elif isinstance(identity, int):
                item = eos.db.getItem(identity, *args, **kwargs)
            elif isinstance(identity, str):
                # We normally lookup with string when we are using import/export
                # features. Check against overrides
                identity = conversions.all.get(identity, identity)
                item = eos.db.getItem(identity, *args, **kwargs)

            elif isinstance(identity, float):
                id_ = int(identity)
                item = eos.db.getItem(id_, *args, **kwargs)
            else:
                raise TypeError(
                    "Need Item object, integer, float or string as argument")
        except (KeyboardInterrupt, SystemExit):
            raise
        except:
            pyfalog.error("Could not get item: {0}", identity)
            raise

        return item

    @staticmethod
    def getItems(itemIDs, eager=None):
        items = eos.db.getItems(itemIDs, eager=eager)
        return items

    def getGroup(self, identity, *args, **kwargs):
        """Get group by its ID or name"""
        if isinstance(identity, types_Group):
            return identity
        elif isinstance(identity, (int, float, str)):
            if isinstance(identity, float):
                identity = int(identity)
            # Check custom groups
            for cgrp in self.customGroups:
                # During first comparison we need exact int, not float for matching
                if cgrp.ID == identity or cgrp.name == identity:
                    # Return first match
                    return cgrp
            # Return eos group if everything else returned nothing
            return eos.db.getGroup(identity, *args, **kwargs)
        else:
            raise TypeError(
                "Need Group object, integer, float or string as argument")

    @staticmethod
    def getCategory(identity, *args, **kwargs):
        """Get category by its ID or name"""
        if isinstance(identity, types_Category):
            category = identity
        elif isinstance(identity, (int, str)):
            category = eos.db.getCategory(identity, *args, **kwargs)
        elif isinstance(identity, float):
            id_ = int(identity)
            category = eos.db.getCategory(id_, *args, **kwargs)
        else:
            raise TypeError(
                "Need Category object, integer, float or string as argument")
        return category

    @staticmethod
    def getMetaGroup(identity, *args, **kwargs):
        """Get meta group by its ID or name"""
        if isinstance(identity, types_MetaGroup):
            metaGroup = identity
        elif isinstance(identity, (int, str)):
            metaGroup = eos.db.getMetaGroup(identity, *args, **kwargs)
        elif isinstance(identity, float):
            id_ = int(identity)
            metaGroup = eos.db.getMetaGroup(id_, *args, **kwargs)
        else:
            raise TypeError(
                "Need MetaGroup object, integer, float or string as argument")
        return metaGroup

    @staticmethod
    def getMarketGroup(identity, *args, **kwargs):
        """Get market group by its ID"""
        if isinstance(identity, types_MarketGroup):
            marketGroup = identity
        elif isinstance(identity, (int, float)):
            id_ = int(identity)
            marketGroup = eos.db.getMarketGroup(id_, *args, **kwargs)
        else:
            raise TypeError(
                "Need MarketGroup object, integer or float as argument")
        return marketGroup

    def getGroupByItem(self, item):
        """Get group by item"""
        if item.typeName in self.ITEMS_FORCEGROUP:
            group = self.ITEMS_FORCEGROUP[item.typeName]
        else:
            group = item.group
        return group

    def getCategoryByItem(self, item):
        """Get category by item"""
        grp = self.getGroupByItem(item)
        cat = grp.category
        return cat

    def getMetaGroupByItem(self, item):
        """Get meta group by item"""
        # Check if item is in forced metagroup map
        if item.name in self.ITEMS_FORCEDMETAGROUP:
            metaGroupName = self.ITEMS_FORCEDMETAGROUP[item.name][0]
            metaGroup = eos.db.getMetaGroup(metaGroupName)
        # If no forced meta group is provided, try to use item's
        # meta group if any
        else:
            metaGroup = item.metaGroup
        return metaGroup

    def getMetaGroupIdByItem(self, item, fallback=0):
        """Get meta group ID by item"""
        id_ = getattr(self.getMetaGroupByItem(item), "ID", fallback)
        return id_

    def getMarketGroupByItem(self, item, parentcheck=True):
        """Get market group by item, its ID or name"""
        # Check if we force market group for given item
        if item.name in self.ITEMS_FORCEDMARKETGROUP:
            mgid = self.ITEMS_FORCEDMARKETGROUP[item.name]
            if mgid in self.SHOWN_MARKET_GROUPS:
                return self.getMarketGroup(mgid)
            else:
                return None
        # Check if item itself has market group
        elif item.marketGroupID:
            if item.marketGroupID in self.SHOWN_MARKET_GROUPS:
                return item.marketGroup
            else:
                return None
        elif parentcheck:
            # If item doesn't have marketgroup, check if it has parent
            # item and use its market group
            parent = self.getParentItemByItem(item, selfparent=False)
            if parent and parent.marketGroupID in self.SHOWN_MARKET_GROUPS:
                return parent.marketGroup
            else:
                return None
        else:
            return None

    def getParentItemByItem(self, item, selfparent=True):
        """Get parent item by item"""
        parent = None
        if item.name in self.ITEMS_FORCEDMETAGROUP:
            parentName = self.ITEMS_FORCEDMETAGROUP[item.name][1]
            parent = self.getItem(parentName)
        if parent is None:
            parent = item.varParent
        # Consider self as parent if item has no parent in database
        if parent is None and selfparent is True:
            parent = item
        return parent

    def getVariationsByItems(self, items, alreadyparent=False):
        """Get item variations by item, its ID or name"""
        # Set for IDs of parent items
        parents = set()
        # Set-container for variables
        variations = set()
        variations_limiter = set()

        # if item belongs to these categories, use their group to find "variations"
        categories = ['Drone', 'Fighter', 'Implant']

        for item in items:
            if item.category.ID == 20 and item.group.ID != 303:  # Implants not Boosters
                implant_remove_list = set()
                implant_remove_list.add("Low-Grade ")
                implant_remove_list.add("Low-grade ")
                implant_remove_list.add("Mid-Grade ")
                implant_remove_list.add("Mid-grade ")
                implant_remove_list.add("High-Grade ")
                implant_remove_list.add("High-grade ")
                implant_remove_list.add("Limited ")
                implant_remove_list.add(" - Advanced")
                implant_remove_list.add(" - Basic")
                implant_remove_list.add(" - Elite")
                implant_remove_list.add(" - Improved")
                implant_remove_list.add(" - Standard")

                for implant_prefix in ("-6", "-7", "-8", "-9", "-10"):
                    for i in range(50):
                        implant_remove_list.add(implant_prefix +
                                                str("%02d" % i))

                for text_to_remove in implant_remove_list:
                    if text_to_remove in item.name:
                        variations_limiter.add(
                            item.name.replace(text_to_remove, ""))

            # Get parent item
            if alreadyparent is False:
                parent = self.getParentItemByItem(item)
            else:
                parent = item
            # Combine both in the same set
            parents.add(parent)
            # Check for overrides and add them if any
            if parent.name in self.ITEMS_FORCEDMETAGROUP_R:
                for _item in self.ITEMS_FORCEDMETAGROUP_R[parent.name]:
                    i = self.getItem(_item)
                    if i:
                        variations.add(i)
        # Add all parents to variations set
        variations.update(parents)
        # Add all variations of parents to the set
        parentids = tuple(item.ID for item in parents)
        groupids = tuple(item.group.ID for item in parents
                         if item.category.name in categories)
        variations_list = eos.db.getVariations(parentids, groupids)

        if variations_limiter:
            for limit in variations_limiter:
                trimmed_variations_list = [
                    variation_item for variation_item in variations_list
                    if limit in variation_item.name
                ]
            if trimmed_variations_list:
                variations_list = trimmed_variations_list

        # If the items are boosters then filter variations to only include boosters for the same slot.
        BOOSTER_GROUP_ID = 303
        if all(map(lambda i: i.group.ID == BOOSTER_GROUP_ID,
                   items)) and len(items) > 0:
            # 'boosterness' is the database's attribute name for Booster Slot
            reqSlot = next(items.__iter__()).getAttribute('boosterness')
            # If the item and it's variation both have a marketGroupID it should match for the variation to be considered valid.
            marketGroupID = [
                next(filter(None, map(lambda i: i.marketGroupID, items)),
                     None), None
            ]
            matchSlotAndMktGrpID = lambda v: v.getAttribute(
                'boosterness') == reqSlot and v.marketGroupID in marketGroupID
            variations_list = list(
                filter(matchSlotAndMktGrpID, variations_list))

        variations.update(variations_list)
        return variations

    def getGroupsByCategory(self, cat):
        """Get groups from given category"""
        groups = set(
            [grp for grp in cat.groups if self.getPublicityByGroup(grp)])

        return groups

    @staticmethod
    def getMarketGroupChildren(mg):
        """Get the children marketGroups of marketGroup."""
        children = set()
        for child in mg.children:
            children.add(child)
        return children

    def getItemsByGroup(self, group):
        """Get items assigned to group"""
        # Return only public items; also, filter out items
        # which were forcibly set to other groups
        groupItems = set(group.items)
        if hasattr(group, 'addItems'):
            groupItems.update(group.addItems)
        items = set([
            item for item in groupItems if self.getPublicityByItem(item)
            and self.getGroupByItem(item) == group
        ])
        return items

    def getItemsByMarketGroup(self, mg, vars_=True):
        """Get items in the given market group"""
        result = set()
        # Get items from eos market group
        baseitms = set(mg.items)
        # Add hardcoded items to set
        if mg.ID in self.ITEMS_FORCEDMARKETGROUP_R:
            forceditms = set(
                self.getItem(itmn)
                for itmn in self.ITEMS_FORCEDMARKETGROUP_R[mg.ID])
            baseitms.update(forceditms)
        if vars_:
            parents = set()
            for item in baseitms:
                # Add one of the base market group items to result
                result.add(item)
                parent = self.getParentItemByItem(item, selfparent=False)
                # If item has no parent, it's base item (or at least should be)
                if parent is None:
                    parents.add(item)
            # Fetch variations only for parent items
            variations = self.getVariationsByItems(parents, alreadyparent=True)
            for variation in variations:
                # Exclude items with their own explicitly defined market groups
                if self.getMarketGroupByItem(variation,
                                             parentcheck=False) is None:
                    result.add(variation)
        else:
            result = baseitms
        # Get rid of unpublished items
        result = set(
            [item_ for item_ in result if self.getPublicityByItem(item_)])
        return result

    def marketGroupHasTypesCheck(self, mg):
        """If market group has any items, return true"""
        if mg and mg.ID in self.ITEMS_FORCEDMARKETGROUP_R:
            # This shouldn't occur normally but makes errors more mild when ITEMS_FORCEDMARKETGROUP is outdated.
            if len(mg.children) > 0 and len(mg.items) == 0:
                pyfalog.error((
                    "Market group \"{0}\" contains no items and has children. "
                    "ITEMS_FORCEDMARKETGROUP is likely outdated and will need to be "
                    "updated for {1} to display correctly.").format(
                        mg, self.ITEMS_FORCEDMARKETGROUP_R[mg.ID]))
                return False
            return True
        elif len(mg.items) > 0 and len(mg.children) == 0:
            return True
        else:
            return False

    def marketGroupValidityCheck(self, mg):
        """Check market group validity"""
        # The only known case when group can be invalid is
        # when it's declared to have types, but it doesn't contain anything
        if mg.ID in self.FORCEDMARKETGROUP:
            return self.FORCEDMARKETGROUP[mg.ID]
        if mg.hasTypes and not self.marketGroupHasTypesCheck(mg):
            return False
        else:
            return True

    def getIconByMarketGroup(self, mg):
        """Return icon associated to marketgroup"""
        if mg.iconID:
            return mg.iconID
        else:
            while mg and not mg.hasTypes:
                mg = mg.parent
            if not mg:
                return ""
            elif self.marketGroupHasTypesCheck(mg):
                # Do not request variations to make process faster
                # Pick random item and use its icon
                items = self.getItemsByMarketGroup(mg, vars_=False)
                try:
                    item = items.pop()
                except KeyError:
                    return ""

                return item.iconID if getattr(item, "icon", None) else ""
            elif self.getMarketGroupChildren(mg) > 0:
                kids = self.getMarketGroupChildren(mg)
                mktGroups = self.getIconByMarketGroup(kids)
                size = len(mktGroups)
                return mktGroups.pop() if size > 0 else ""
            else:
                return ""

    def getPublicityByItem(self, item):
        """Return if an item is published"""
        if item.typeName in self.ITEMS_FORCEPUBLISHED:
            pub = self.ITEMS_FORCEPUBLISHED[item.typeName]
        else:
            pub = item.published
        return pub

    def getPublicityByGroup(self, group):
        """Return if an group is published"""
        if group.name in self.GROUPS_FORCEPUBLISHED:
            pub = self.GROUPS_FORCEPUBLISHED[group.name]
        else:
            pub = group.published
        return pub

    def getMarketRoot(self):
        """
        Get the root of the market tree.
        Returns a list, where each element is a tuple containing:
        the ID, the name and the icon of the group
        """
        root = set()
        for id_ in self.ROOT_MARKET_GROUPS:
            mg = self.getMarketGroup(id_)
            root.add(mg)

        return root

    def getShipRoot(self):
        cat1 = self.getCategory("Ship")
        cat2 = self.getCategory("Structure")
        root = set(
            self.getGroupsByCategory(cat1) | self.getGroupsByCategory(cat2))

        return root

    def getShipList(self, grpid):
        """Get ships for given group id"""
        grp = self.getGroup(grpid,
                            eager=("items", "items.group",
                                   "items.marketGroup"))
        ships = self.getItemsByGroup(grp)
        return ships

    def getShipListDelayed(self, id_, callback):
        """Background version of getShipList"""
        self.shipBrowserWorkerThread.queue.put((id_, callback))

    def searchShips(self, name):
        """Find ships according to given text pattern"""
        filter_ = types_Category.name.in_(["Ship", "Structure"])
        results = eos.db.searchItems(name,
                                     where=filter_,
                                     join=(types_Item.group,
                                           types_Group.category),
                                     eager=("group.category", "metaGroup"))
        ships = set()
        for item in results:
            if self.getPublicityByItem(item):
                ships.add(item)
        return ships

    def searchItems(self, name, callback, filterName=None):
        """Find items according to given text pattern"""
        self.searchWorkerThread.scheduleSearch(name, callback, filterName)

    @staticmethod
    def getItemsWithOverrides():
        overrides = eos.db.getAllOverrides()
        items = set()
        for x in overrides:
            if x.item is None:
                eos.db.saveddata_session.delete(x)
                eos.db.commit()
            else:
                items.add(x.item)
        return list(items)

    @staticmethod
    def directAttrRequest(items, attribs):
        try:
            itemIDs = tuple([i.ID for i in items])
        except TypeError:
            itemIDs = (items.ID, )
        try:
            attrIDs = tuple([i.ID for i in attribs])
        except TypeError:
            attrIDs = (attribs.ID, )
        info = {}
        for itemID, typeID, val in eos.db.directAttributeRequest(
                itemIDs, attrIDs):
            info[itemID] = val

        return info

    def getImplantTree(self):
        """Return implant market group children"""
        img = self.getMarketGroup(27)
        return self.getMarketGroupChildren(img)

    def filterItemsByMeta(self, items, metas):
        """Filter items by meta lvl"""
        filtered = [
            item for item in items if self.getMetaGroupIdByItem(item) in metas
        ]
        return filtered

    def getReplacements(self, identity):
        item = self.getItem(identity)
        # We already store needed type IDs in database
        replTypeIDs = {int(i)
                       for i in item.replacements.split(",")
                       if i} if item.replacements is not None else {}
        if not replTypeIDs:
            return ()
        # As replacements were generated without keeping track which items were published,
        # filter them out here
        items = []
        for typeID in replTypeIDs:
            item = self.getItem(typeID)
            if not item:
                continue
            if self.getPublicityByItem(item):
                items.append(item)
        return items

    def getRecentlyUsed(self):
        recentlyUsedItems = []
        for itemID in self.serviceMarketRecentlyUsedModules[
                "pyfaMarketRecentlyUsedModules"]:
            item = self.getItem(itemID)
            if item is None:
                self.serviceMarketRecentlyUsedModules[
                    "pyfaMarketRecentlyUsedModules"].remove(itemID)
            recentlyUsedItems.append(item)
        return recentlyUsedItems

    def storeRecentlyUsed(self, itemID):
        recentlyUsedModules = self.serviceMarketRecentlyUsedModules[
            "pyfaMarketRecentlyUsedModules"]
        while itemID in recentlyUsedModules:
            recentlyUsedModules.remove(itemID)
        item = self.getItem(itemID)
        if item.isAbyssal:
            return
        while len(recentlyUsedModules) >= 20:
            recentlyUsedModules.pop(-1)
        recentlyUsedModules.insert(0, itemID)

    def itemSort(self, item, reverseMktGrp=False):
        catname = self.getCategoryByItem(item).name
        try:
            mktgrpid = self.getMarketGroupByItem(item).ID
        except AttributeError:
            mktgrpid = -1
            pyfalog.warning("unable to find market group for {}".format(
                item.typeName))
        if reverseMktGrp:
            mktgrpid = -mktgrpid
        parentname = self.getParentItemByItem(item).name
        # Get position of market group
        metagrpid = self.getMetaGroupIdByItem(item)
        metatab = self.META_MAP_REVERSE_GROUPED.get(metagrpid)
        metalvl = item.metaLevel or 0
        return catname, mktgrpid, parentname, metatab, metalvl, item.name
コード例 #47
0
ファイル: optimizer.py プロジェクト: hologerry/stylegan
class Optimizer:
    """A Wrapper for tf.train.Optimizer.

    Automatically takes care of:
    - Gradient averaging for multi-GPU training.
    - Dynamic loss scaling and typecasts for FP16 training.
    - Ignoring corrupted gradients that contain NaNs/Infs.
    - Reporting statistics.
    - Well-chosen default settings.
    """

    def __init__(self,
                 name: str = "Train",
                 tf_optimizer: str = "tf.train.AdamOptimizer",
                 learning_rate: TfExpressionEx = 0.001,
                 use_loss_scaling: bool = False,
                 loss_scaling_init: float = 64.0,
                 loss_scaling_inc: float = 0.0005,
                 loss_scaling_dec: float = 1.0,
                 **kwargs):

        # Init fields.
        self.name = name
        self.learning_rate = tf.convert_to_tensor(learning_rate)
        self.id = self.name.replace("/", ".")
        self.scope = tf.get_default_graph().unique_name(self.id)
        self.optimizer_class = util.get_obj_by_name(tf_optimizer)
        self.optimizer_kwargs = dict(kwargs)
        self.use_loss_scaling = use_loss_scaling
        self.loss_scaling_init = loss_scaling_init
        self.loss_scaling_inc = loss_scaling_inc
        self.loss_scaling_dec = loss_scaling_dec
        self._grad_shapes = None  # [shape, ...]
        self._dev_opt = OrderedDict()  # device => optimizer
        self._dev_grads = OrderedDict()  # device => [[(grad, var), ...], ...]
        self._dev_ls_var = OrderedDict()  # device => variable (log2 of loss scaling factor)
        self._updates_applied = False

    def register_gradients(self, loss: TfExpression, trainable_vars: Union[List, dict]) -> None:
        """Register the gradients of the given loss function with respect to the given variables.
        Intended to be called once per GPU."""
        assert not self._updates_applied

        # Validate arguments.
        if isinstance(trainable_vars, dict):
            trainable_vars = list(trainable_vars.values())  # allow passing in Network.trainables as vars

        assert isinstance(trainable_vars, list) and len(trainable_vars) >= 1
        assert all(tfutil.is_tf_expression(expr) for expr in trainable_vars + [loss])

        if self._grad_shapes is None:
            self._grad_shapes = [tfutil.shape_to_list(var.shape) for var in trainable_vars]

        assert len(trainable_vars) == len(self._grad_shapes)
        assert all(tfutil.shape_to_list(var.shape) == var_shape for var, var_shape in zip(trainable_vars, self._grad_shapes))

        dev = loss.device

        assert all(var.device == dev for var in trainable_vars)

        # Register device and compute gradients.
        with tf.name_scope(self.id + "_grad"), tf.device(dev):
            if dev not in self._dev_opt:
                opt_name = self.scope.replace("/", "_") + "_opt%d" % len(self._dev_opt)
                assert callable(self.optimizer_class)
                self._dev_opt[dev] = self.optimizer_class(name=opt_name, learning_rate=self.learning_rate, **self.optimizer_kwargs)
                self._dev_grads[dev] = []

            loss = self.apply_loss_scaling(tf.cast(loss, tf.float32))
            grads = self._dev_opt[dev].compute_gradients(loss, trainable_vars, gate_gradients=tf.train.Optimizer.GATE_NONE)  # disable gating to reduce memory usage
            grads = [(g, v) if g is not None else (tf.zeros_like(v), v) for g, v in grads]  # replace disconnected gradients with zeros
            self._dev_grads[dev].append(grads)

    def apply_updates(self) -> tf.Operation:
        """Construct training op to update the registered variables based on their gradients."""
        tfutil.assert_tf_initialized()
        assert not self._updates_applied
        self._updates_applied = True
        devices = list(self._dev_grads.keys())
        total_grads = sum(len(grads) for grads in self._dev_grads.values())
        assert len(devices) >= 1 and total_grads >= 1
        ops = []

        with tfutil.absolute_name_scope(self.scope):
            # Cast gradients to FP32 and calculate partial sum within each device.
            dev_grads = OrderedDict()  # device => [(grad, var), ...]

            for dev_idx, dev in enumerate(devices):
                with tf.name_scope("ProcessGrads%d" % dev_idx), tf.device(dev):
                    sums = []

                    for gv in zip(*self._dev_grads[dev]):
                        assert all(v is gv[0][1] for g, v in gv)
                        g = [tf.cast(g, tf.float32) for g, v in gv]
                        g = g[0] if len(g) == 1 else tf.add_n(g)
                        sums.append((g, gv[0][1]))

                    dev_grads[dev] = sums

            # Sum gradients across devices.
            if len(devices) > 1:
                with tf.name_scope("SumAcrossGPUs"), tf.device(None):
                    for var_idx, grad_shape in enumerate(self._grad_shapes):
                        g = [dev_grads[dev][var_idx][0] for dev in devices]

                        if np.prod(grad_shape):  # nccl does not support zero-sized tensors
                            g = nccl_ops.all_sum(g)

                        for dev, gg in zip(devices, g):
                            dev_grads[dev][var_idx] = (gg, dev_grads[dev][var_idx][1])

            # Apply updates separately on each device.
            for dev_idx, (dev, grads) in enumerate(dev_grads.items()):
                with tf.name_scope("ApplyGrads%d" % dev_idx), tf.device(dev):
                    # Scale gradients as needed.
                    if self.use_loss_scaling or total_grads > 1:
                        with tf.name_scope("Scale"):
                            coef = tf.constant(np.float32(1.0 / total_grads), name="coef")
                            coef = self.undo_loss_scaling(coef)
                            grads = [(g * coef, v) for g, v in grads]

                    # Check for overflows.
                    with tf.name_scope("CheckOverflow"):
                        grad_ok = tf.reduce_all(tf.stack([tf.reduce_all(tf.is_finite(g)) for g, v in grads]))

                    # Update weights and adjust loss scaling.
                    with tf.name_scope("UpdateWeights"):
                        # pylint: disable=cell-var-from-loop
                        opt = self._dev_opt[dev]
                        ls_var = self.get_loss_scaling_var(dev)

                        if not self.use_loss_scaling:
                            ops.append(tf.cond(grad_ok, lambda: opt.apply_gradients(grads), tf.no_op))
                        else:
                            ops.append(tf.cond(grad_ok,
                                               lambda: tf.group(tf.assign_add(ls_var, self.loss_scaling_inc), opt.apply_gradients(grads)),
                                               lambda: tf.group(tf.assign_sub(ls_var, self.loss_scaling_dec))))

                    # Report statistics on the last device.
                    if dev == devices[-1]:
                        with tf.name_scope("Statistics"):
                            ops.append(autosummary.autosummary(self.id + "/learning_rate", self.learning_rate))
                            ops.append(autosummary.autosummary(self.id + "/overflow_frequency", tf.where(grad_ok, 0, 1)))

                            if self.use_loss_scaling:
                                ops.append(autosummary.autosummary(self.id + "/loss_scaling_log2", ls_var))

            # Initialize variables and group everything into a single op.
            self.reset_optimizer_state()
            tfutil.init_uninitialized_vars(list(self._dev_ls_var.values()))

            return tf.group(*ops, name="TrainingOp")

    def reset_optimizer_state(self) -> None:
        """Reset internal state of the underlying optimizer."""
        tfutil.assert_tf_initialized()
        tfutil.run([var.initializer for opt in self._dev_opt.values() for var in opt.variables()])

    def get_loss_scaling_var(self, device: str) -> Union[tf.Variable, None]:
        """Get or create variable representing log2 of the current dynamic loss scaling factor."""
        if not self.use_loss_scaling:
            return None

        if device not in self._dev_ls_var:
            with tfutil.absolute_name_scope(self.scope + "/LossScalingVars"), tf.control_dependencies(None):
                self._dev_ls_var[device] = tf.Variable(np.float32(self.loss_scaling_init), name="loss_scaling_var")

        return self._dev_ls_var[device]

    def apply_loss_scaling(self, value: TfExpression) -> TfExpression:
        """Apply dynamic loss scaling for the given expression."""
        assert tfutil.is_tf_expression(value)

        if not self.use_loss_scaling:
            return value

        return value * tfutil.exp2(self.get_loss_scaling_var(value.device))

    def undo_loss_scaling(self, value: TfExpression) -> TfExpression:
        """Undo the effect of dynamic loss scaling for the given expression."""
        assert tfutil.is_tf_expression(value)

        if not self.use_loss_scaling:
            return value

        return value * tfutil.exp2(-self.get_loss_scaling_var(value.device))  # pylint: disable=invalid-unary-operand-type
コード例 #48
0
ファイル: svhn.py プロジェクト: mjwillson/fuel
def convert_svhn_format_1(directory, output_directory,
                          output_filename='svhn_format_1.hdf5'):
    """Converts the SVHN dataset (format 1) to HDF5.

    This method assumes the existence of the files
    `{train,test,extra}.tar.gz`, which are accessible through the
    official website [SVHN].

    .. [SVHN] http://ufldl.stanford.edu/housenumbers/

    Parameters
    ----------
    directory : str
        Directory in which input files reside.
    output_directory : str
        Directory in which to save the converted dataset.
    output_filename : str, optional
        Name of the saved dataset. Defaults to 'svhn_format_1.hdf5'.

    Returns
    -------
    output_paths : tuple of str
        Single-element tuple containing the path to the converted dataset.

    """
    try:
        output_path = os.path.join(output_directory, output_filename)
        h5file = h5py.File(output_path, mode='w')
        TMPDIR = tempfile.mkdtemp()

        # Every image has three channels (RGB) and variable height and width.
        # It features a variable number of bounding boxes that identify the
        # location and label of digits. The bounding box location is specified
        # using the x and y coordinates of its top left corner along with its
        # width and height.
        BoundingBoxes = namedtuple(
            'BoundingBoxes', ['labels', 'heights', 'widths', 'lefts', 'tops'])
        sources = ('features',) + tuple('bbox_{}'.format(field)
                                        for field in BoundingBoxes._fields)
        source_dtypes = dict([(source, 'uint8') for source in sources[:2]] +
                             [(source, 'uint16') for source in sources[2:]])
        source_axis_labels = {
            'features': ('channel', 'height', 'width'),
            'bbox_labels': ('bounding_box', 'index'),
            'bbox_heights': ('bounding_box', 'height'),
            'bbox_widths': ('bounding_box', 'width'),
            'bbox_lefts': ('bounding_box', 'x'),
            'bbox_tops': ('bounding_box', 'y')}

        # The dataset is split into three sets: the training set, the test set
        # and an extra set of examples that are somewhat less difficult but
        # can be used as extra training data. These sets are stored separately
        # as 'train.tar.gz', 'test.tar.gz' and 'extra.tar.gz'. Each file
        # contains a directory named after the split it stores. The examples
        # are stored in that directory as PNG images. The directory also
        # contains a 'digitStruct.mat' file with all the bounding box and
        # label information.
        splits = ('train', 'test', 'extra')
        file_paths = dict(zip(splits, FORMAT_1_FILES))
        for split, path in file_paths.items():
            file_paths[split] = os.path.join(directory, path)
        digit_struct_paths = dict(
            [(split, os.path.join(TMPDIR, split, 'digitStruct.mat'))
             for split in splits])

        # We first extract the data files in a temporary directory. While doing
        # that, we also count the number of examples for each split. Files are
        # extracted individually, which allows to display a progress bar. Since
        # the splits will be concatenated in the HDF5 file, we also compute the
        # start and stop intervals of each split within the concatenated array.
        def extract_tar(split):
            with tarfile.open(file_paths[split], 'r:gz') as f:
                members = f.getmembers()
                num_examples = sum(1 for m in members if '.png' in m.name)
                progress_bar_context = progress_bar(
                    name='{} file'.format(split), maxval=len(members),
                    prefix='Extracting')
                with progress_bar_context as bar:
                    for i, member in enumerate(members):
                        f.extract(member, path=TMPDIR)
                        bar.update(i)
            return num_examples

        examples_per_split = OrderedDict(
            [(split, extract_tar(split)) for split in splits])
        cumulative_num_examples = numpy.cumsum(
            [0] + list(examples_per_split.values()))
        num_examples = cumulative_num_examples[-1]
        intervals = zip(cumulative_num_examples[:-1],
                        cumulative_num_examples[1:])
        split_intervals = dict(zip(splits, intervals))

        # The start and stop indices are used to create a split dict that will
        # be parsed into the split array required by the H5PYDataset interface.
        # The split dict is organized as follows:
        #
        #     dict(split -> dict(source -> (start, stop)))
        #
        split_dict = OrderedDict([
            (split, OrderedDict([(s, split_intervals[split])
                                 for s in sources]))
            for split in splits])
        h5file.attrs['split'] = H5PYDataset.create_split_array(split_dict)

        # We then prepare the HDF5 dataset. This involves creating datasets to
        # store data sources and datasets to store auxiliary information
        # (namely the shapes for variable-length axes, and labels to indicate
        # what these variable-length axes represent).
        def make_vlen_dataset(source):
            # Create a variable-length 1D dataset
            dtype = h5py.special_dtype(vlen=numpy.dtype(source_dtypes[source]))
            dataset = h5file.create_dataset(
                source, (num_examples,), dtype=dtype)
            # Create a dataset to store variable-length shapes.
            axis_labels = source_axis_labels[source]
            dataset_shapes = h5file.create_dataset(
                '{}_shapes'.format(source), (num_examples, len(axis_labels)),
                dtype='uint16')
            # Create a dataset to store labels for variable-length axes.
            dataset_vlen_axis_labels = h5file.create_dataset(
                '{}_vlen_axis_labels'.format(source), (len(axis_labels),),
                dtype='S{}'.format(
                    numpy.max([len(label) for label in axis_labels])))
            # Fill variable-length axis labels
            dataset_vlen_axis_labels[...] = [
                label.encode('utf8') for label in axis_labels]
            # Attach auxiliary datasets as dimension scales of the
            # variable-length 1D dataset. This is in accordance with the
            # H5PYDataset interface.
            dataset.dims.create_scale(dataset_shapes, 'shapes')
            dataset.dims[0].attach_scale(dataset_shapes)
            dataset.dims.create_scale(dataset_vlen_axis_labels, 'shape_labels')
            dataset.dims[0].attach_scale(dataset_vlen_axis_labels)
            # Tag fixed-length axis with its label
            dataset.dims[0].label = 'batch'

        for source in sources:
            make_vlen_dataset(source)

        # The "fun" part begins: we extract the bounding box and label
        # information contained in 'digitStruct.mat'. This is a version 7.3
        # Matlab file, which uses HDF5 under the hood, albeit with a very
        # convoluted layout.
        def get_boxes(split):
            boxes = []
            with h5py.File(digit_struct_paths[split], 'r') as f:
                bar_name = '{} digitStruct'.format(split)
                bar_maxval = examples_per_split[split]
                with progress_bar(bar_name, bar_maxval) as bar:
                    for image_number in range(examples_per_split[split]):
                        # The 'digitStruct' group is the main group of the HDF5
                        # file. It contains two datasets: 'bbox' and 'name'.
                        # The 'name' dataset isn't of interest to us, as it
                        # stores file names and there's already a one-to-one
                        # mapping between row numbers and image names (e.g.
                        # row 0 corresponds to '1.png', row 1 corresponds to
                        # '2.png', and so on).
                        main_group = f['digitStruct']
                        # The 'bbox' dataset contains the bounding box and
                        # label information we're after. It has as many rows
                        # as there are images, and one column. Elements of the
                        # 'bbox' dataset are object references that point to
                        # (yet another) group that contains the information
                        # for the corresponding image.
                        image_reference = main_group['bbox'][image_number, 0]

                        # There are five datasets contained in that group:
                        # 'label', 'height', 'width', 'left' and 'top'. Each of
                        # those datasets has as many rows as there are bounding
                        # boxes in the corresponding image, and one column.
                        def get_dataset(name):
                            return main_group[image_reference][name][:, 0]
                        names = ('label', 'height', 'width', 'left', 'top')
                        datasets = dict(
                            [(name, get_dataset(name)) for name in names])

                        # If there is only one bounding box, the information is
                        # stored directly in the datasets. If there are
                        # multiple bounding boxes, elements of those datasets
                        # are object references pointing to 1x1 datasets that
                        # store the information (fortunately, it's the last
                        # hop we need to make).
                        def get_elements(dataset):
                            if len(dataset) > 1:
                                return [int(main_group[reference][0, 0])
                                        for reference in dataset]
                            else:
                                return [int(dataset[0])]
                        # Names are pluralized in the BoundingBox named tuple.
                        kwargs = dict(
                            [(name + 's', get_elements(dataset))
                             for name, dataset in iteritems(datasets)])
                        boxes.append(BoundingBoxes(**kwargs))
                        if bar:
                            bar.update(image_number)
            return boxes

        split_boxes = dict([(split, get_boxes(split)) for split in splits])

        # The final step is to fill the HDF5 file.
        def fill_split(split, bar=None):
            for image_number in range(examples_per_split[split]):
                image_path = os.path.join(
                    TMPDIR, split, '{}.png'.format(image_number + 1))
                image = numpy.asarray(
                    Image.open(image_path)).transpose(2, 0, 1)
                bounding_boxes = split_boxes[split][image_number]
                num_boxes = len(bounding_boxes.labels)
                index = image_number + split_intervals[split][0]

                h5file['features'][index] = image.flatten()
                h5file['features'].dims[0]['shapes'][index] = image.shape
                for field in BoundingBoxes._fields:
                    name = 'bbox_{}'.format(field)
                    h5file[name][index] = getattr(bounding_boxes, field)
                    h5file[name].dims[0]['shapes'][index] = [num_boxes, 1]

                # Replace label '10' with '0'.
                labels = h5file['bbox_labels'][index]
                labels[labels == 10] = 0
                h5file['bbox_labels'][index] = labels

                if image_number % 1000 == 0:
                    h5file.flush()
                if bar:
                    bar.update(index)

        with progress_bar('SVHN format 1', num_examples) as bar:
            for split in splits:
                fill_split(split, bar=bar)
    finally:
        if os.path.isdir(TMPDIR):
            shutil.rmtree(TMPDIR)
        h5file.flush()
        h5file.close()

    return (output_path,)
コード例 #49
0
ファイル: group.py プロジェクト: whoiszyc/andes
class GroupBase:
    """
    Base class for groups.
    """
    def __init__(self):
        self.common_params = ['u', 'name']
        self.common_vars = []

        self.models = OrderedDict()  # model name: model instance
        self._idx2model = OrderedDict()  # element idx: model instance
        self.uid = {}  # idx - group internal 0-indexed uid
        self.services_ref = OrderedDict()  # BackRef

    def __setattr__(self, key, value):
        if hasattr(value, 'owner'):
            if value.owner is None:
                value.owner = self
        if hasattr(value, 'name'):
            if value.name is None:
                value.name = key

        if isinstance(value, BackRef):
            self.services_ref[key] = value

        super().__setattr__(key, value)

    @property
    def class_name(self):
        return self.__class__.__name__

    @property
    def n(self):
        """
        Total number of devices.
        """
        return len(self._idx2model)

    def add_model(self, name: str, instance):
        """
        Add a Model instance to group.

        Parameters
        ----------
        name : str
            Model name
        instance : Model
            Model instance

        Returns
        -------
        None
        """
        if name not in self.models:
            self.models[name] = instance
        else:
            raise KeyError(
                f"{self.class_name}: Duplicate model registration of {name}")

    def add(self, idx, model):
        """
        Register an idx from model_name to the group

        Parameters
        ----------
        idx: Union[str, float, int]
            Register an element to a model

        model: Model
            instance of the model

        Returns
        -------

        """
        if idx in self._idx2model:
            raise KeyError(
                f'Group <{self.class_name}> already contains <{repr(idx)}> from '
                f'<{self._idx2model[idx].class_name}>')
        self.uid[idx] = self.n
        self._idx2model[idx] = model

    def idx2model(self, idx, allow_none=False):
        """
        Find model name for the given idx.

        Parameters
        ----------
        idx : float, int, str, array-like
            idx or idx-es of devices.
        allow_none : bool
           If True, return `None` at the positions where idx is not found.

        Returns
        -------
        If `idx` is a list, return a list of model instances.
        If `idx` is a single element, return a model instance.
        """

        ret = []
        single = False

        if not isinstance(idx, (list, tuple, np.ndarray)):
            single = True
            idx = (idx, )
        elif len(idx) > 0 and isinstance(idx[0], (list, tuple, np.ndarray)):
            idx = list_flatten(idx)

        for i in idx:
            try:
                if i is None and allow_none:
                    ret.append(None)
                else:
                    ret.append(self._idx2model[i])
            except KeyError:
                raise KeyError(
                    f'Group <{self.class_name}> does not contain device with idx={i}'
                )

        if single:
            ret = ret[0]
        return ret

    def idx2uid(self, idx):
        """
        Convert idx to the 0-indexed unique index.

        Parameters
        ----------
        idx : array-like, numbers, or str
            idx of devices

        Returns
        -------
        list
            A list containing the unique indices of the devices
        """
        if idx is None:
            logger.debug("idx2uid returned None for idx None")
            return None
        if isinstance(idx, (float, int, str, np.int32, np.int64, np.float64)):
            return self.uid[idx]
        elif isinstance(idx, Sized):
            if len(idx) > 0 and isinstance(idx[0], (list, np.ndarray)):
                idx = list_flatten(idx)
            return [self.uid[i] if i is not None else None for i in idx]
        else:
            raise NotImplementedError(f'Unknown idx type {type(idx)}')

    def get(self,
            src: str,
            idx,
            attr: str = 'v',
            allow_none=False,
            default=0.0):
        """
        Based on the indexer, get the `attr` field of the `src` parameter or variable.

        Parameters
        ----------
        src : str
            param or var name
        idx : array-like
            device idx
        attr
            The attribute of the param or var to retrieve
        allow_none : bool
            True to allow None values in the indexer
        default : float
            If `allow_none` is true, the default value to use for None indexer.

        Returns
        -------
        The requested param or variable attribute. If `idx` is a list, return a list of values.
        If `idx` is a single element, return a single value.
        """
        self._check_src(src)
        self._check_idx(idx)

        single = False
        if not isinstance(idx, (list, np.ndarray)):
            idx = [idx]
            single = True

        n = len(idx)
        if n == 0:
            return np.zeros(0)

        ret = [''] * n
        _type_set = False

        models = self.idx2model(idx, allow_none=allow_none)

        for i, idx in enumerate(idx):
            if models[i] is not None:
                uid = models[i].idx2uid(idx)
                instance = models[i].__dict__[src]
                val = instance.__dict__[attr][uid]
            else:
                val = default

            # deduce the type for ret
            if not _type_set:
                if isinstance(val, str):
                    ret = [''] * n
                else:
                    ret = np.zeros(n)
                _type_set = True

            ret[i] = val

        if single:
            ret = ret[0]

        return ret

    def set(self, src: str, idx, attr, value):
        """
        Set the value of an attribute of a group property.
        Performs ``self.<src>.<attr>[idx] = value``.

        The user needs to ensure that the property is shared by all models
        in this group.

        Parameters
        ----------
        src : str
            Name of property.
        idx : str, int, float, array-like
            Indices of devices.
        attr : str, optional, default='v'
            The internal attribute of the property to get.
            ``v`` for values, ``a`` for address, and ``e`` for equation value.
        value : array-like
            New values to be set

        Returns
        -------
        bool
            True when successful.
        """
        self._check_src(src)
        self._check_idx(idx)

        if not isinstance(idx, (list, np.ndarray)):
            idx = [idx]

        if isinstance(value, (float, str, int)):
            value = [value] * len(idx)

        models = self.idx2model(idx)

        for i, idx in enumerate(idx):
            model = models[i]
            uid = model.idx2uid(idx)
            instance = model.__dict__[src]
            instance.__dict__[attr][uid] = value[i]

        return True

    def find_idx(self, keys, values, allow_none=False, default=None):
        """
        Find indices of devices that satisfy the given `key=value` condition.

        This method iterates over all models in this group.
        """
        indices_found = []
        # `indices_found` contains found indices returned from all models of this group
        for model in self.models.values():
            indices_found.append(
                model.find_idx(keys, values, allow_none=True, default=default))

        out = []
        for idx, idx_found in enumerate(zip(*indices_found)):
            if not allow_none:
                if idx_found.count(None) == len(idx_found):
                    missing_values = [item[idx] for item in values]
                    raise IndexError(
                        f'{list(keys)} = {missing_values} not found in {self.class_name}'
                    )

            real_idx = None
            for item in idx_found:
                if item is not None:
                    real_idx = item
                    break
            out.append(real_idx)
        return out

    def _check_src(self, src: str):
        if src not in self.common_vars + self.common_params:
            # raise AttributeError(f'Group <{self.class_name}> does not share property <{src}>.')
            logger.debug(
                f'Group <{self.class_name}> does not share property <{src}>.')
            pass

    def _check_idx(self, idx):
        if idx is None:
            raise IndexError(f'{self.class_name}: idx cannot be None')

    def get_next_idx(self, idx=None, model_name=None):
        """
        Get a no-conflict idx for a new device.
        Use the provided ``idx`` if no conflict.
        Generate a new one otherwise.

        Parameters
        ----------
        idx : str or None
            Proposed idx. If None, assign a new one.
        model_name : str or None
            Model name. If not, prepend the group name.

        Returns
        -------
        str
            New device name.

        """
        if model_name is None:
            model_name = self.class_name

        need_new = False

        if idx is not None:
            if idx not in self._idx2model:
                # name is good
                pass
            else:
                logger.warning(
                    f"Group {self.class_name}: idx={idx} is used by {self.idx2model(idx).class_name}. "
                    f"Data may be inconsistent.")
                need_new = True
        else:
            need_new = True

        if need_new is True:
            count = self.n
            while True:
                # IMPORTANT: automatically assigned index is 1-indexed. Namely, `GENCLS_1` is the first generator.
                # This is because when we say, for example, `GENCLS_10`, people usually assume it starts at 1.
                idx = model_name + '_' + str(count + 1)
                if idx not in self._idx2model:
                    break
                else:
                    count += 1

        return idx

    def doc(self, export='plain'):
        """
        Return the documentation of the group in a string.
        """
        out = ''
        if export == 'rest':
            out += f'.. _{self.class_name}:\n\n'
            group_header = '================================================================================\n'
        else:
            group_header = ''

        if export == 'rest':
            out += group_header + f'{self.class_name}\n' + group_header
        else:
            out += group_header + f'Group <{self.class_name}>\n' + group_header

        if self.__doc__:
            out += str(self.__doc__) + '\n\n'

        if len(self.common_params):
            out += 'Common Parameters: ' + ', '.join(self.common_params)
            out += '\n\n'
        if len(self.common_vars):
            out += 'Common Variables: ' + ', '.join(self.common_vars)
            out += '\n\n'
        if len(self.models):
            out += 'Available models:\n'
            model_name_list = list(self.models.keys())

            if export == 'rest':

                def add_reference(name_list):
                    return [f'{item}_' for item in name_list]

                model_name_list = add_reference(model_name_list)

            out += ',\n'.join(model_name_list) + '\n'

        return out

    def doc_all(self, export='plain'):
        """
        Return documentation of the group and its models.

        Parameters
        ----------
        export : 'plain' or 'rest'
            Export format, plain-text or RestructuredText

        Returns
        -------
        str

        """
        out = self.doc(export=export)
        out += '\n'
        for instance in self.models.values():
            out += instance.doc(export=export)
            out += '\n'
        return out
コード例 #50
0
ファイル: space.py プロジェクト: fangwudi/tvm
class ConfigSpace(object):
    """The configuration space of a schedule. Pass it as config in template to
       collect transformation space and build transform graph of axes
    """
    def __init__(self):
        # private dict to provide sugar
        self.space_map = OrderedDict()    # name -> space
        self._collect = True
        self._length = None
        self._entity_map = OrderedDict()  # name -> entity
        self._constraints = []
        self.errors = []
        self.template_key = None
        self.code_hash = None
        self.flop = 0
        self.is_fallback = False

    @staticmethod
    def axis(var):
        """get a virtual axis (axis placeholder)

        Parameters
        ----------
        var: int or tvm.schedule.IterVar
            If is int, return an axis whose length is the provided argument.
            If is IterVar, return an axis whose length is extracted from the
                           IterVar's extent domain.
        """
        return VirtualAxis(var)

    reduce_axis = axis

    def define_split(self, name, axis, policy='factors', **kwargs):
        """Define a new tunable knob which splits an axis into a list of axes

        Parameters
        ----------
        name: str
            name to index the entity of this space
        axis: tvm.schedule.IterVar
            axis to split
        policy: str
            name of policy.
            If is 'factors', the tuner will try all divisible factors.
            If is 'power2', the tuner will try power-of-two factors less or equal to the length.
            If is 'verbose', the tuner will try all candidates in above two policies.
            If is 'candidate', try given candidates.
        kwargs: dict
            extra arguments for policy
            max_factor: int
                the maximum split factor.
            filter: function(int) -> bool
                see examples below for how to use filter.
            num_outputs: int
                the total number of axis after split.
            no_tail: bool
                should we only include divisible numbers as split factors.
            candidate: list
                (policy=candidate) manual candidate list.

        Examples
        --------
        >>> # use custom candidates
        >>> cfg.define_split('tile_x', x, policy='candidate', candidate=[[1, 4, 4], [4, 1, 4]])

        >>> # use a filter that only accepts the split scheme whose inner most tile is less then 4
        >>> cfg.define_split('tile_y', y, policy='factors', filter=lambda x: x.size[-1] <= 4)
        """
        axes = [axis]
        return self._add_new_transform(SplitSpace, name, axes, policy, **kwargs)

    def define_reorder(self, name, axes, policy, **kwargs):
        """Define a new tunable knob which reorders a list of axes

        Parameters
        ----------
        name: str
            name to index the entity of this space
        axes: Array of tvm.schedule.IterVar
            axes to reorder
        policy: str
            name of policy
            If is 'identity', do an identity permutation.
            If is 'all', try all permutations.
            If is 'interval_all', try all permutations of an interval of axes.
            If is 'candidate', try listed candidate.
            If is 'interleave', interleave chains of spatial axes and chains of reduction axes.
        kwargs: dict
            extra arguments for policy
        """
        return self._add_new_transform(ReorderSpace, name, axes, policy, **kwargs)

    def define_annotate(self, name, axes, policy, **kwargs):
        """Define a new tunable knob which annotates a list of axes

        Parameters
        ----------
        name: str
            name to index the entity of this space
        axes: Array of tvm.schedule.IterVar
            axes to annotate
        policy: str
            name of policy
            If is 'unroll', unroll the axes.
            If is 'try_unroll', try to unroll the axes.
            If is 'try_unroll_vec', try to unroll or vectorize the axes.
            If is 'bind_gpu', bind the first few axes to gpu threads.
            If is 'locate_cache', choose n axes to attach shared/local cache.
        kwargs: dict
            extra arguments for policy
        """
        return self._add_new_transform(AnnotateSpace, name, axes, policy, **kwargs)

    def define_knob(self, name, candidate):
        """Define a tunable knob with a list of candidates

        Parameters
        ----------
        name: str
            name key of that option
        candidate: list
            list of candidates
        """
        return self._add_new_transform(OtherOptionSpace, name, [], None, candidate=candidate)

    def add_flop(self, flop):
        """Add float operation statistics for this tuning task

        Parameters
        ---------
        flop: int or float
            number of float operations
        """
        self.flop += flop

    def raise_error(self, msg):
        """register error in config
        Using this to actively detect error when scheudling.
        Otherwise these error will occur during runtime, which
        will cost more time.

        Parameters
        ----------
        msg: str
        """
        self.errors.append(msg)

    def valid(self):
        """Check whether the config meets all the constraints
        Note: This check should be called after instantiation of task,
              because the ConfigEntity/ConfigSpace collects errors during instantiation

        Returns
        -------
        valid: bool
            whether the config meets all the constraints
        """
        return not bool(self.errors)

    def _add_new_transform(self, space_class, name, axes, policy, **kwargs):
        """Add a new transform space in template"""
        if self._collect:
            # convert schedule axis to space definition axis
            axes = [x if isinstance(x, (VirtualAxis, Axis)) else self.axis(x) for x in axes]

            # add subspace (knob)
            space = space_class(axes, policy, **kwargs)
            self.space_map[name] = space
            self._entity_map[name] = space[0]
            return [Axis(space, i) for i in range(space.num_output)]
        return [Axis(None, i) for i in range(space_class.get_num_output(axes, policy, **kwargs))]

    def __len__(self):
        if self._length is None:
            self._length = int(np.prod([len(x) for x in self.space_map.values()]))
        return self._length

    def get(self, index):
        """Get a config entity with detailed parameters from this space

        Parameters
        ----------
        index: int
            index in the space
        """
        entities = OrderedDict()
        t = index
        for name, space in self.space_map.items():
            entities[name] = space[t % len(space)]
            t //= len(space)
        ret = ConfigEntity(index, self.code_hash, self.template_key, entities, self._constraints)
        return ret

    def __iter__(self):
        return self._entity_map.__iter__()

    def __getitem__(self, name):
        """get the transform entity(knob) of this entity by name
           do not use this to get a ConfigEntity of this space (should use ConfigSpace.get instead)

        Parameters
        ----------
        name: str
            name of the transform
        """
        return self._entity_map[name]

    def __repr__(self):
        res = "ConfigSpace (len=%d, space_map=\n" % len(self)
        for i, (name, space) in enumerate(self.space_map.items()):
            res += "  %2d %s: %s\n" % (i, name, space)
        return res + ")"
コード例 #51
0
ファイル: state.py プロジェクト: theassyrian/discord.py
class ConnectionState:
    def __init__(self, *, dispatch, chunker, handlers, syncer, http, loop, **options):
        self.loop = loop
        self.http = http
        self.max_messages = options.get('max_messages', 1000)
        if self.max_messages is not None and self.max_messages <= 0:
            self.max_messages = 1000

        self.dispatch = dispatch
        self.chunker = chunker
        self.syncer = syncer
        self.is_bot = None
        self.handlers = handlers
        self.shard_count = None
        self._ready_task = None
        self._fetch_offline = options.get('fetch_offline_members', True)
        self.heartbeat_timeout = options.get('heartbeat_timeout', 60.0)
        self.guild_subscriptions = options.get('guild_subscriptions', True)
        allowed_mentions = options.get('allowed_mentions')

        if allowed_mentions is not None and not isinstance(allowed_mentions, AllowedMentions):
            raise TypeError('allowed_mentions parameter must be AllowedMentions')

        self.allowed_mentions = allowed_mentions
        # Only disable cache if both fetch_offline and guild_subscriptions are off.
        self._cache_members = (self._fetch_offline or self.guild_subscriptions)
        self._listeners = []

        activity = options.get('activity', None)
        if activity:
            if not isinstance(activity, BaseActivity):
                raise TypeError('activity parameter must derive from BaseActivity.')

            activity = activity.to_dict()

        status = options.get('status', None)
        if status:
            if status is Status.offline:
                status = 'invisible'
            else:
                status = str(status)

        self._activity = activity
        self._status = status

        self.parsers = parsers = {}
        for attr, func in inspect.getmembers(self):
            if attr.startswith('parse_'):
                parsers[attr[6:].upper()] = func

        self.clear()

    def clear(self):
        self.user = None
        self._users = weakref.WeakValueDictionary()
        self._emojis = {}
        self._calls = {}
        self._guilds = {}
        self._voice_clients = {}

        # LRU of max size 128
        self._private_channels = OrderedDict()
        # extra dict to look up private channels by user id
        self._private_channels_by_user = {}
        self._messages = self.max_messages and deque(maxlen=self.max_messages)

        # In cases of large deallocations the GC should be called explicitly
        # To free the memory more immediately, especially true when it comes
        # to reconnect loops which cause mass allocations and deallocations.
        gc.collect()

    def process_listeners(self, listener_type, argument, result):
        removed = []
        for i, listener in enumerate(self._listeners):
            if listener.type != listener_type:
                continue

            future = listener.future
            if future.cancelled():
                removed.append(i)
                continue

            try:
                passed = listener.predicate(argument)
            except Exception as exc:
                future.set_exception(exc)
                removed.append(i)
            else:
                if passed:
                    future.set_result(result)
                    removed.append(i)
                    if listener.type == ListenerType.chunk:
                        break

        for index in reversed(removed):
            del self._listeners[index]

    def call_handlers(self, key, *args, **kwargs):
        try:
            func = self.handlers[key]
        except KeyError:
            pass
        else:
            func(*args, **kwargs)

    @property
    def self_id(self):
        u = self.user
        return u.id if u else None

    @property
    def voice_clients(self):
        return list(self._voice_clients.values())

    def _get_voice_client(self, guild_id):
        return self._voice_clients.get(guild_id)

    def _add_voice_client(self, guild_id, voice):
        self._voice_clients[guild_id] = voice

    def _remove_voice_client(self, guild_id):
        self._voice_clients.pop(guild_id, None)

    def _update_references(self, ws):
        for vc in self.voice_clients:
            vc.main_ws = ws

    def store_user(self, data):
        # this way is 300% faster than `dict.setdefault`.
        user_id = int(data['id'])
        try:
            return self._users[user_id]
        except KeyError:
            user = User(state=self, data=data)
            if user.discriminator != '0000':
                self._users[user_id] = user
            return user

    def get_user(self, id):
        return self._users.get(id)

    def store_emoji(self, guild, data):
        emoji_id = int(data['id'])
        self._emojis[emoji_id] = emoji = Emoji(guild=guild, state=self, data=data)
        return emoji

    @property
    def guilds(self):
        return list(self._guilds.values())

    def _get_guild(self, guild_id):
        return self._guilds.get(guild_id)

    def _add_guild(self, guild):
        self._guilds[guild.id] = guild

    def _remove_guild(self, guild):
        self._guilds.pop(guild.id, None)

        for emoji in guild.emojis:
            self._emojis.pop(emoji.id, None)

        del guild

        # Much like clear(), if we have a massive deallocation
        # then it's better to explicitly call the GC
        gc.collect()

    @property
    def emojis(self):
        return list(self._emojis.values())

    def get_emoji(self, emoji_id):
        return self._emojis.get(emoji_id)

    @property
    def private_channels(self):
        return list(self._private_channels.values())

    def _get_private_channel(self, channel_id):
        try:
            value = self._private_channels[channel_id]
        except KeyError:
            return None
        else:
            self._private_channels.move_to_end(channel_id)
            return value

    def _get_private_channel_by_user(self, user_id):
        return self._private_channels_by_user.get(user_id)

    def _add_private_channel(self, channel):
        channel_id = channel.id
        self._private_channels[channel_id] = channel

        if self.is_bot and len(self._private_channels) > 128:
            _, to_remove = self._private_channels.popitem(last=False)
            if isinstance(to_remove, DMChannel):
                self._private_channels_by_user.pop(to_remove.recipient.id, None)

        if isinstance(channel, DMChannel):
            self._private_channels_by_user[channel.recipient.id] = channel

    def add_dm_channel(self, data):
        channel = DMChannel(me=self.user, state=self, data=data)
        self._add_private_channel(channel)
        return channel

    def _remove_private_channel(self, channel):
        self._private_channels.pop(channel.id, None)
        if isinstance(channel, DMChannel):
            self._private_channels_by_user.pop(channel.recipient.id, None)

    def _get_message(self, msg_id):
        return utils.find(lambda m: m.id == msg_id, reversed(self._messages)) if self._messages else None

    def _add_guild_from_data(self, guild):
        guild = Guild(data=guild, state=self)
        self._add_guild(guild)
        return guild

    def chunks_needed(self, guild):
        for _ in range(math.ceil(guild._member_count / 1000)):
            yield self.receive_chunk(guild.id)

    def _get_guild_channel(self, data):
        channel_id = int(data['channel_id'])
        try:
            guild = self._get_guild(int(data['guild_id']))
        except KeyError:
            channel = self.get_channel(channel_id)
            guild = None
        else:
            channel = guild and guild.get_channel(channel_id)

        return channel or Object(id=channel_id), guild

    async def request_offline_members(self, guilds):
        # get all the chunks
        chunks = []
        for guild in guilds:
            chunks.extend(self.chunks_needed(guild))

        # we only want to request ~75 guilds per chunk request.
        splits = [guilds[i:i + 75] for i in range(0, len(guilds), 75)]
        for split in splits:
            await self.chunker(split)

        # wait for the chunks
        if chunks:
            try:
                await utils.sane_wait_for(chunks, timeout=len(chunks) * 30.0)
            except asyncio.TimeoutError:
                log.warning('Somehow timed out waiting for chunks.')
            else:
                log.info('Finished requesting guild member chunks for %d guilds.', len(guilds))

    async def query_members(self, guild, query, limit, cache):
        guild_id = guild.id
        ws = self._get_websocket(guild_id)
        if ws is None:
            raise RuntimeError('Somehow do not have a websocket for this guild_id')

        # Limits over 1000 cannot be supported since
        # the main use case for this is guild_subscriptions being disabled
        # and they don't receive GUILD_MEMBER events which make computing
        # member_count impossible. The only way to fix it is by limiting
        # the limit parameter to 1 to 1000.
        future = self.receive_member_query(guild_id, query)
        try:
            # start the query operation
            await ws.request_chunks(guild_id, query, limit)
            members = await asyncio.wait_for(future, timeout=5.0)

            if cache:
                for member in members:
                    guild._add_member(member)

            return members
        except asyncio.TimeoutError:
            log.warning('Timed out waiting for chunks with query %r and limit %d for guild_id %d', query, limit, guild_id)
            raise

    async def _delay_ready(self):
        try:
            launch = self._ready_state.launch

            # only real bots wait for GUILD_CREATE streaming
            if self.is_bot:
                while True:
                    # this snippet of code is basically waiting 2 seconds
                    # until the last GUILD_CREATE was sent
                    try:
                        await asyncio.wait_for(launch.wait(), timeout=2.0)
                    except asyncio.TimeoutError:
                        break
                    else:
                        launch.clear()

            guilds = next(zip(*self._ready_state.guilds), [])
            if self._fetch_offline:
                await self.request_offline_members(guilds)

            for guild, unavailable in self._ready_state.guilds:
                if unavailable is False:
                    self.dispatch('guild_available', guild)
                else:
                    self.dispatch('guild_join', guild)

            # remove the state
            try:
                del self._ready_state
            except AttributeError:
                pass # already been deleted somehow

            # call GUILD_SYNC after we're done chunking
            if not self.is_bot:
                log.info('Requesting GUILD_SYNC for %s guilds', len(self.guilds))
                await self.syncer([s.id for s in self.guilds])
        except asyncio.CancelledError:
            pass
        else:
            # dispatch the event
            self.call_handlers('ready')
            self.dispatch('ready')
        finally:
            self._ready_task = None

    def parse_ready(self, data):
        if self._ready_task is not None:
            self._ready_task.cancel()

        self._ready_state = ReadyState(launch=asyncio.Event(), guilds=[])
        self.clear()
        self.user = user = ClientUser(state=self, data=data['user'])
        self._users[user.id] = user

        guilds = self._ready_state.guilds
        for guild_data in data['guilds']:
            guild = self._add_guild_from_data(guild_data)
            if (not self.is_bot and not guild.unavailable) or guild.large:
                guilds.append((guild, guild.unavailable))

        for relationship in data.get('relationships', []):
            try:
                r_id = int(relationship['id'])
            except KeyError:
                continue
            else:
                user._relationships[r_id] = Relationship(state=self, data=relationship)

        for pm in data.get('private_channels', []):
            factory, _ = _channel_factory(pm['type'])
            self._add_private_channel(factory(me=user, data=pm, state=self))

        self.dispatch('connect')
        self._ready_task = asyncio.ensure_future(self._delay_ready(), loop=self.loop)

    def parse_resumed(self, data):
        self.dispatch('resumed')

    def parse_message_create(self, data):
        channel, _ = self._get_guild_channel(data)
        message = Message(channel=channel, data=data, state=self)
        self.dispatch('message', message)
        if self._messages is not None:
            self._messages.append(message)
        if channel and channel.__class__ is TextChannel:
            channel.last_message_id = message.id

    def parse_message_delete(self, data):
        raw = RawMessageDeleteEvent(data)
        found = self._get_message(raw.message_id)
        raw.cached_message = found
        self.dispatch('raw_message_delete', raw)
        if self._messages is not None and found is not None:
            self.dispatch('message_delete', found)
            self._messages.remove(found)

    def parse_message_delete_bulk(self, data):
        raw = RawBulkMessageDeleteEvent(data)
        if self._messages:
            found_messages = [message for message in self._messages if message.id in raw.message_ids]
        else:
            found_messages = []
        raw.cached_messages = found_messages
        self.dispatch('raw_bulk_message_delete', raw)
        if found_messages:
            self.dispatch('bulk_message_delete', found_messages)
            for msg in found_messages:
                self._messages.remove(msg)

    def parse_message_update(self, data):
        raw = RawMessageUpdateEvent(data)
        message = self._get_message(raw.message_id)
        if message is not None:
            older_message = copy.copy(message)
            raw.cached_message = older_message
            self.dispatch('raw_message_edit', raw)
            message._update(data)
            self.dispatch('message_edit', older_message, message)
        else:
            self.dispatch('raw_message_edit', raw)

    def parse_message_reaction_add(self, data):
        emoji = data['emoji']
        emoji_id = utils._get_as_snowflake(emoji, 'id')
        emoji = PartialEmoji.with_state(self, id=emoji_id, animated=emoji.get('animated', False), name=emoji['name'])
        raw = RawReactionActionEvent(data, emoji, 'REACTION_ADD')

        member_data = data.get('member')
        if member_data:
            guild = self._get_guild(raw.guild_id)
            raw.member = Member(data=member_data, guild=guild, state=self)
        else:
            raw.member = None
        self.dispatch('raw_reaction_add', raw)

        # rich interface here
        message = self._get_message(raw.message_id)
        if message is not None:
            emoji = self._upgrade_partial_emoji(emoji)
            reaction = message._add_reaction(data, emoji, raw.user_id)
            user = raw.member or self._get_reaction_user(message.channel, raw.user_id)

            if user:
                self.dispatch('reaction_add', reaction, user)

    def parse_message_reaction_remove_all(self, data):
        raw = RawReactionClearEvent(data)
        self.dispatch('raw_reaction_clear', raw)

        message = self._get_message(raw.message_id)
        if message is not None:
            old_reactions = message.reactions.copy()
            message.reactions.clear()
            self.dispatch('reaction_clear', message, old_reactions)

    def parse_message_reaction_remove(self, data):
        emoji = data['emoji']
        emoji_id = utils._get_as_snowflake(emoji, 'id')
        emoji = PartialEmoji.with_state(self, id=emoji_id, name=emoji['name'])
        raw = RawReactionActionEvent(data, emoji, 'REACTION_REMOVE')
        self.dispatch('raw_reaction_remove', raw)

        message = self._get_message(raw.message_id)
        if message is not None:
            emoji = self._upgrade_partial_emoji(emoji)
            try:
                reaction = message._remove_reaction(data, emoji, raw.user_id)
            except (AttributeError, ValueError): # eventual consistency lol
                pass
            else:
                user = self._get_reaction_user(message.channel, raw.user_id)
                if user:
                    self.dispatch('reaction_remove', reaction, user)

    def parse_message_reaction_remove_emoji(self, data):
        emoji = data['emoji']
        emoji_id = utils._get_as_snowflake(emoji, 'id')
        emoji = PartialEmoji.with_state(self, id=emoji_id, name=emoji['name'])
        raw = RawReactionClearEmojiEvent(data, emoji)
        self.dispatch('raw_reaction_clear_emoji', raw)

        message = self._get_message(raw.message_id)
        if message is not None:
            try:
                reaction = message._clear_emoji(emoji)
            except (AttributeError, ValueError): # eventual consistency lol
                pass
            else:
                if reaction:
                    self.dispatch('reaction_clear_emoji', reaction)

    def parse_presence_update(self, data):
        guild_id = utils._get_as_snowflake(data, 'guild_id')
        guild = self._get_guild(guild_id)
        if guild is None:
            log.warning('PRESENCE_UPDATE referencing an unknown guild ID: %s. Discarding.', guild_id)
            return

        user = data['user']
        member_id = int(user['id'])
        member = guild.get_member(member_id)
        if member is None:
            if 'username' not in user:
                # sometimes we receive 'incomplete' member data post-removal.
                # skip these useless cases.
                return

            member, old_member = Member._from_presence_update(guild=guild, data=data, state=self)
            guild._add_member(member)
        else:
            old_member = Member._copy(member)
            user_update = member._presence_update(data=data, user=user)
            if user_update:
                self.dispatch('user_update', user_update[0], user_update[1])

        self.dispatch('member_update', old_member, member)

    def parse_user_update(self, data):
        self.user._update(data)

    def parse_invite_create(self, data):
        invite = Invite.from_gateway(state=self, data=data)
        self.dispatch('invite_create', invite)

    def parse_invite_delete(self, data):
        invite = Invite.from_gateway(state=self, data=data)
        self.dispatch('invite_delete', invite)

    def parse_channel_delete(self, data):
        guild = self._get_guild(utils._get_as_snowflake(data, 'guild_id'))
        channel_id = int(data['id'])
        if guild is not None:
            channel = guild.get_channel(channel_id)
            if channel is not None:
                guild._remove_channel(channel)
                self.dispatch('guild_channel_delete', channel)
        else:
            # the reason we're doing this is so it's also removed from the
            # private channel by user cache as well
            channel = self._get_private_channel(channel_id)
            if channel is not None:
                self._remove_private_channel(channel)
                self.dispatch('private_channel_delete', channel)

    def parse_channel_update(self, data):
        channel_type = try_enum(ChannelType, data.get('type'))
        channel_id = int(data['id'])
        if channel_type is ChannelType.group:
            channel = self._get_private_channel(channel_id)
            old_channel = copy.copy(channel)
            channel._update_group(data)
            self.dispatch('private_channel_update', old_channel, channel)
            return

        guild_id = utils._get_as_snowflake(data, 'guild_id')
        guild = self._get_guild(guild_id)
        if guild is not None:
            channel = guild.get_channel(channel_id)
            if channel is not None:
                old_channel = copy.copy(channel)
                channel._update(guild, data)
                self.dispatch('guild_channel_update', old_channel, channel)
            else:
                log.warning('CHANNEL_UPDATE referencing an unknown channel ID: %s. Discarding.', channel_id)
        else:
            log.warning('CHANNEL_UPDATE referencing an unknown guild ID: %s. Discarding.', guild_id)

    def parse_channel_create(self, data):
        factory, ch_type = _channel_factory(data['type'])
        if factory is None:
            log.warning('CHANNEL_CREATE referencing an unknown channel type %s. Discarding.', data['type'])
            return

        channel = None

        if ch_type in (ChannelType.group, ChannelType.private):
            channel_id = int(data['id'])
            if self._get_private_channel(channel_id) is None:
                channel = factory(me=self.user, data=data, state=self)
                self._add_private_channel(channel)
                self.dispatch('private_channel_create', channel)
        else:
            guild_id = utils._get_as_snowflake(data, 'guild_id')
            guild = self._get_guild(guild_id)
            if guild is not None:
                channel = factory(guild=guild, state=self, data=data)
                guild._add_channel(channel)
                self.dispatch('guild_channel_create', channel)
            else:
                log.warning('CHANNEL_CREATE referencing an unknown guild ID: %s. Discarding.', guild_id)
                return

    def parse_channel_pins_update(self, data):
        channel_id = int(data['channel_id'])
        channel = self.get_channel(channel_id)
        if channel is None:
            log.warning('CHANNEL_PINS_UPDATE referencing an unknown channel ID: %s. Discarding.', channel_id)
            return

        last_pin = utils.parse_time(data['last_pin_timestamp']) if data['last_pin_timestamp'] else None

        try:
            # I have not imported discord.abc in this file
            # the isinstance check is also 2x slower than just checking this attribute
            # so we're just gonna check it since it's easier and faster and lazier
            channel.guild
        except AttributeError:
            self.dispatch('private_channel_pins_update', channel, last_pin)
        else:
            self.dispatch('guild_channel_pins_update', channel, last_pin)

    def parse_channel_recipient_add(self, data):
        channel = self._get_private_channel(int(data['channel_id']))
        user = self.store_user(data['user'])
        channel.recipients.append(user)
        self.dispatch('group_join', channel, user)

    def parse_channel_recipient_remove(self, data):
        channel = self._get_private_channel(int(data['channel_id']))
        user = self.store_user(data['user'])
        try:
            channel.recipients.remove(user)
        except ValueError:
            pass
        else:
            self.dispatch('group_remove', channel, user)

    def parse_guild_member_add(self, data):
        guild = self._get_guild(int(data['guild_id']))
        if guild is None:
            log.warning('GUILD_MEMBER_ADD referencing an unknown guild ID: %s. Discarding.', data['guild_id'])
            return

        member = Member(guild=guild, data=data, state=self)
        if self._cache_members:
            guild._add_member(member)
        guild._member_count += 1
        self.dispatch('member_join', member)

    def parse_guild_member_remove(self, data):
        guild = self._get_guild(int(data['guild_id']))
        if guild is not None:
            user_id = int(data['user']['id'])
            member = guild.get_member(user_id)
            if member is not None:
                guild._remove_member(member)
                guild._member_count -= 1
                self.dispatch('member_remove', member)
        else:
            log.warning('GUILD_MEMBER_REMOVE referencing an unknown guild ID: %s. Discarding.', data['guild_id'])

    def parse_guild_member_update(self, data):
        guild = self._get_guild(int(data['guild_id']))
        user = data['user']
        user_id = int(user['id'])
        if guild is None:
            log.warning('GUILD_MEMBER_UPDATE referencing an unknown guild ID: %s. Discarding.', data['guild_id'])
            return

        member = guild.get_member(user_id)
        if member is not None:
            old_member = copy.copy(member)
            member._update(data)
            self.dispatch('member_update', old_member, member)
        else:
            log.warning('GUILD_MEMBER_UPDATE referencing an unknown member ID: %s. Discarding.', user_id)

    def parse_guild_emojis_update(self, data):
        guild = self._get_guild(int(data['guild_id']))
        if guild is None:
            log.warning('GUILD_EMOJIS_UPDATE referencing an unknown guild ID: %s. Discarding.', data['guild_id'])
            return

        before_emojis = guild.emojis
        for emoji in before_emojis:
            self._emojis.pop(emoji.id, None)
        guild.emojis = tuple(map(lambda d: self.store_emoji(guild, d), data['emojis']))
        self.dispatch('guild_emojis_update', guild, before_emojis, guild.emojis)

    def _get_create_guild(self, data):
        if data.get('unavailable') is False:
            # GUILD_CREATE with unavailable in the response
            # usually means that the guild has become available
            # and is therefore in the cache
            guild = self._get_guild(int(data['id']))
            if guild is not None:
                guild.unavailable = False
                guild._from_data(data)
                return guild

        return self._add_guild_from_data(data)

    async def _chunk_and_dispatch(self, guild, unavailable):
        chunks = list(self.chunks_needed(guild))
        await self.chunker(guild)
        if chunks:
            try:
                await utils.sane_wait_for(chunks, timeout=len(chunks))
            except asyncio.TimeoutError:
                log.info('Somehow timed out waiting for chunks.')

        if unavailable is False:
            self.dispatch('guild_available', guild)
        else:
            self.dispatch('guild_join', guild)

    def parse_guild_create(self, data):
        unavailable = data.get('unavailable')
        if unavailable is True:
            # joined a guild with unavailable == True so..
            return

        guild = self._get_create_guild(data)

        # check if it requires chunking
        if guild.large:
            if unavailable is False:
                # check if we're waiting for 'useful' READY
                # and if we are, we don't want to dispatch any
                # event such as guild_join or guild_available
                # because we're still in the 'READY' phase. Or
                # so we say.
                try:
                    state = self._ready_state
                    state.launch.set()
                    state.guilds.append((guild, unavailable))
                except AttributeError:
                    # the _ready_state attribute is only there during
                    # processing of useful READY.
                    pass
                else:
                    return

            # since we're not waiting for 'useful' READY we'll just
            # do the chunk request here if wanted
            if self._fetch_offline:
                asyncio.ensure_future(self._chunk_and_dispatch(guild, unavailable), loop=self.loop)
                return

        # Dispatch available if newly available
        if unavailable is False:
            self.dispatch('guild_available', guild)
        else:
            self.dispatch('guild_join', guild)

    def parse_guild_sync(self, data):
        guild = self._get_guild(int(data['id']))
        guild._sync(data)

    def parse_guild_update(self, data):
        guild = self._get_guild(int(data['id']))
        if guild is not None:
            old_guild = copy.copy(guild)
            guild._from_data(data)
            self.dispatch('guild_update', old_guild, guild)
        else:
            log.warning('GUILD_UPDATE referencing an unknown guild ID: %s. Discarding.', data['id'])

    def parse_guild_delete(self, data):
        guild = self._get_guild(int(data['id']))
        if guild is None:
            log.warning('GUILD_DELETE referencing an unknown guild ID: %s. Discarding.', data['id'])
            return

        if data.get('unavailable', False) and guild is not None:
            # GUILD_DELETE with unavailable being True means that the
            # guild that was available is now currently unavailable
            guild.unavailable = True
            self.dispatch('guild_unavailable', guild)
            return

        # do a cleanup of the messages cache
        if self._messages is not None:
            self._messages = deque((msg for msg in self._messages if msg.guild != guild), maxlen=self.max_messages)

        self._remove_guild(guild)
        self.dispatch('guild_remove', guild)

    def parse_guild_ban_add(self, data):
        # we make the assumption that GUILD_BAN_ADD is done
        # before GUILD_MEMBER_REMOVE is called
        # hence we don't remove it from cache or do anything
        # strange with it, the main purpose of this event
        # is mainly to dispatch to another event worth listening to for logging
        guild = self._get_guild(int(data['guild_id']))
        if guild is not None:
            try:
                user = User(data=data['user'], state=self)
            except KeyError:
                pass
            else:
                member = guild.get_member(user.id) or user
                self.dispatch('member_ban', guild, member)

    def parse_guild_ban_remove(self, data):
        guild = self._get_guild(int(data['guild_id']))
        if guild is not None:
            if 'user' in data:
                user = self.store_user(data['user'])
                self.dispatch('member_unban', guild, user)

    def parse_guild_role_create(self, data):
        guild = self._get_guild(int(data['guild_id']))
        if guild is None:
            log.warning('GUILD_ROLE_CREATE referencing an unknown guild ID: %s. Discarding.', data['guild_id'])
            return

        role_data = data['role']
        role = Role(guild=guild, data=role_data, state=self)
        guild._add_role(role)
        self.dispatch('guild_role_create', role)

    def parse_guild_role_delete(self, data):
        guild = self._get_guild(int(data['guild_id']))
        if guild is not None:
            role_id = int(data['role_id'])
            try:
                role = guild._remove_role(role_id)
            except KeyError:
                return
            else:
                self.dispatch('guild_role_delete', role)
        else:
            log.warning('GUILD_ROLE_DELETE referencing an unknown guild ID: %s. Discarding.', data['guild_id'])

    def parse_guild_role_update(self, data):
        guild = self._get_guild(int(data['guild_id']))
        if guild is not None:
            role_data = data['role']
            role_id = int(role_data['id'])
            role = guild.get_role(role_id)
            if role is not None:
                old_role = copy.copy(role)
                role._update(role_data)
                self.dispatch('guild_role_update', old_role, role)
        else:
            log.warning('GUILD_ROLE_UPDATE referencing an unknown guild ID: %s. Discarding.', data['guild_id'])

    def parse_guild_members_chunk(self, data):
        guild_id = int(data['guild_id'])
        guild = self._get_guild(guild_id)
        members = [Member(guild=guild, data=member, state=self) for member in data.get('members', [])]
        log.debug('Processed a chunk for %s members in guild ID %s.', len(members), guild_id)
        if self._cache_members:
            for member in members:
                existing = guild.get_member(member.id)
                if existing is None or existing.joined_at is None:
                    guild._add_member(member)

        self.process_listeners(ListenerType.chunk, guild, len(members))
        names = [x.name.lower() for x in members]
        self.process_listeners(ListenerType.query_members, (guild_id, names), members)

    def parse_guild_integrations_update(self, data):
        guild = self._get_guild(int(data['guild_id']))
        if guild is not None:
            self.dispatch('guild_integrations_update', guild)
        else:
            log.warning('GUILD_INTEGRATIONS_UPDATE referencing an unknown guild ID: %s. Discarding.', data['guild_id'])

    def parse_webhooks_update(self, data):
        channel = self.get_channel(int(data['channel_id']))
        if channel is not None:
            self.dispatch('webhooks_update', channel)
        else:
            log.warning('WEBHOOKS_UPDATE referencing an unknown channel ID: %s. Discarding.', data['channel_id'])

    def parse_voice_state_update(self, data):
        guild = self._get_guild(utils._get_as_snowflake(data, 'guild_id'))
        channel_id = utils._get_as_snowflake(data, 'channel_id')
        if guild is not None:
            if int(data['user_id']) == self.user.id:
                voice = self._get_voice_client(guild.id)
                if voice is not None:
                    ch = guild.get_channel(channel_id)
                    if ch is not None:
                        voice.channel = ch

            member, before, after = guild._update_voice_state(data, channel_id)
            if member is not None:
                self.dispatch('voice_state_update', member, before, after)
            else:
                log.warning('VOICE_STATE_UPDATE referencing an unknown member ID: %s. Discarding.', data['user_id'])
        else:
            # in here we're either at private or group calls
            call = self._calls.get(channel_id)
            if call is not None:
                call._update_voice_state(data)

    def parse_voice_server_update(self, data):
        try:
            key_id = int(data['guild_id'])
        except KeyError:
            key_id = int(data['channel_id'])

        vc = self._get_voice_client(key_id)
        if vc is not None:
            asyncio.ensure_future(vc._create_socket(key_id, data))

    def parse_typing_start(self, data):
        channel, guild = self._get_guild_channel(data)
        if channel is not None:
            member = None
            user_id = utils._get_as_snowflake(data, 'user_id')
            if isinstance(channel, DMChannel):
                member = channel.recipient
            elif isinstance(channel, TextChannel) and guild is not None:
                member = guild.get_member(user_id)
            elif isinstance(channel, GroupChannel):
                member = utils.find(lambda x: x.id == user_id, channel.recipients)

            if member is not None:
                timestamp = datetime.datetime.utcfromtimestamp(data.get('timestamp'))
                self.dispatch('typing', channel, member, timestamp)

    def parse_relationship_add(self, data):
        key = int(data['id'])
        old = self.user.get_relationship(key)
        new = Relationship(state=self, data=data)
        self.user._relationships[key] = new
        if old is not None:
            self.dispatch('relationship_update', old, new)
        else:
            self.dispatch('relationship_add', new)

    def parse_relationship_remove(self, data):
        key = int(data['id'])
        try:
            old = self.user._relationships.pop(key)
        except KeyError:
            pass
        else:
            self.dispatch('relationship_remove', old)

    def _get_reaction_user(self, channel, user_id):
        if isinstance(channel, TextChannel):
            return channel.guild.get_member(user_id)
        return self.get_user(user_id)

    def get_reaction_emoji(self, data):
        emoji_id = utils._get_as_snowflake(data, 'id')

        if not emoji_id:
            return data['name']

        try:
            return self._emojis[emoji_id]
        except KeyError:
            return PartialEmoji(animated=data.get('animated', False), id=emoji_id, name=data['name'])

    def _upgrade_partial_emoji(self, emoji):
        emoji_id = emoji.id
        if not emoji_id:
            return emoji.name
        try:
            return self._emojis[emoji_id]
        except KeyError:
            return emoji

    def get_channel(self, id):
        if id is None:
            return None

        pm = self._get_private_channel(id)
        if pm is not None:
            return pm

        for guild in self.guilds:
            channel = guild.get_channel(id)
            if channel is not None:
                return channel

    def create_message(self, *, channel, data):
        return Message(state=self, channel=channel, data=data)

    def receive_chunk(self, guild_id):
        future = self.loop.create_future()
        listener = Listener(ListenerType.chunk, future, lambda s: s.id == guild_id)
        self._listeners.append(listener)
        return future

    def receive_member_query(self, guild_id, query):
        def predicate(args, *, guild_id=guild_id, query=query.lower()):
            request_guild_id, names = args
            return request_guild_id == guild_id and all(n.startswith(query) for n in names)
        future = self.loop.create_future()
        listener = Listener(ListenerType.query_members, future, predicate)
        self._listeners.append(listener)
        return future
コード例 #52
0
ファイル: req_set.py プロジェクト: 00rain00/ReadJson
class RequirementSet(object):
    def __init__(self, require_hashes=False, check_supported_wheels=True):
        # type: (bool, bool) -> None
        """Create a RequirementSet.
        """

        self.requirements = OrderedDict(
        )  # type: Dict[str, InstallRequirement]  # noqa: E501
        self.require_hashes = require_hashes
        self.check_supported_wheels = check_supported_wheels

        self.unnamed_requirements = []  # type: List[InstallRequirement]
        self.successfully_downloaded = []  # type: List[InstallRequirement]
        self.reqs_to_cleanup = []  # type: List[InstallRequirement]

    def __str__(self):
        # type: () -> str
        requirements = sorted(
            (req for req in self.requirements.values() if not req.comes_from),
            key=lambda req: canonicalize_name(req.name),
        )
        return ' '.join(str(req.req) for req in requirements)

    def __repr__(self):
        # type: () -> str
        requirements = sorted(
            self.requirements.values(),
            key=lambda req: canonicalize_name(req.name),
        )

        format_string = '<{classname} object; {count} requirement(s): {reqs}>'
        return format_string.format(
            classname=self.__class__.__name__,
            count=len(requirements),
            reqs=', '.join(str(req.req) for req in requirements),
        )

    def add_unnamed_requirement(self, install_req):
        # type: (InstallRequirement) -> None
        assert not install_req.name
        self.unnamed_requirements.append(install_req)

    def add_named_requirement(self, install_req):
        # type: (InstallRequirement) -> None
        assert install_req.name

        project_name = canonicalize_name(install_req.name)
        self.requirements[project_name] = install_req

    def add_requirement(
        self,
        install_req,  # type: InstallRequirement
        parent_req_name=None,  # type: Optional[str]
        extras_requested=None  # type: Optional[Iterable[str]]
    ):
        # type: (...) -> Tuple[List[InstallRequirement], Optional[InstallRequirement]]  # noqa: E501
        """Add install_req as a requirement to install.

        :param parent_req_name: The name of the requirement that needed this
            added. The name is used because when multiple unnamed requirements
            resolve to the same name, we could otherwise end up with dependency
            links that point outside the Requirements set. parent_req must
            already be added. Note that None implies that this is a user
            supplied requirement, vs an inferred one.
        :param extras_requested: an iterable of extras used to evaluate the
            environment markers.
        :return: Additional requirements to scan. That is either [] if
            the requirement is not applicable, or [install_req] if the
            requirement is applicable and has just been added.
        """
        # If the markers do not match, ignore this requirement.
        if not install_req.match_markers(extras_requested):
            logger.info(
                "Ignoring %s: markers '%s' don't match your environment",
                install_req.name,
                install_req.markers,
            )
            return [], None

        # If the wheel is not supported, raise an error.
        # Should check this after filtering out based on environment markers to
        # allow specifying different wheels based on the environment/OS, in a
        # single requirements file.
        if install_req.link and install_req.link.is_wheel:
            wheel = Wheel(install_req.link.filename)
            tags = pep425tags.get_supported()
            if (self.check_supported_wheels and not wheel.supported(tags)):
                raise InstallationError(
                    "%s is not a supported wheel on this platform." %
                    wheel.filename)

        # This next bit is really a sanity check.
        assert install_req.is_direct == (parent_req_name is None), (
            "a direct req shouldn't have a parent and also, "
            "a non direct req should have a parent")

        # Unnamed requirements are scanned again and the requirement won't be
        # added as a dependency until after scanning.
        if not install_req.name:
            self.add_unnamed_requirement(install_req)
            return [install_req], None

        try:
            existing_req = self.get_requirement(install_req.name)
        except KeyError:
            existing_req = None

        has_conflicting_requirement = (
            parent_req_name is None and existing_req
            and not existing_req.constraint
            and existing_req.extras == install_req.extras
            and existing_req.req.specifier != install_req.req.specifier)
        if has_conflicting_requirement:
            raise InstallationError(
                "Double requirement given: %s (already in %s, name=%r)" %
                (install_req, existing_req, install_req.name))

        # When no existing requirement exists, add the requirement as a
        # dependency and it will be scanned again after.
        if not existing_req:
            self.add_named_requirement(install_req)
            # We'd want to rescan this requirement later
            return [install_req], install_req

        # Assume there's no need to scan, and that we've already
        # encountered this for scanning.
        if install_req.constraint or not existing_req.constraint:
            return [], existing_req

        does_not_satisfy_constraint = (
            install_req.link
            and not (existing_req.link
                     and install_req.link.path == existing_req.link.path))
        if does_not_satisfy_constraint:
            self.reqs_to_cleanup.append(install_req)
            raise InstallationError(
                "Could not satisfy constraints for '%s': "
                "installation from path or url cannot be "
                "constrained to a version" % install_req.name, )
        # If we're now installing a constraint, mark the existing
        # object for real installation.
        existing_req.constraint = False
        existing_req.extras = tuple(
            sorted(set(existing_req.extras) | set(install_req.extras)))
        logger.debug(
            "Setting %s extras to: %s",
            existing_req,
            existing_req.extras,
        )
        # Return the existing requirement for addition to the parent and
        # scanning again.
        return [existing_req], existing_req

    def has_requirement(self, name):
        # type: (str) -> bool
        project_name = canonicalize_name(name)

        return (project_name in self.requirements
                and not self.requirements[project_name].constraint)

    def get_requirement(self, name):
        # type: (str) -> InstallRequirement
        project_name = canonicalize_name(name)

        if project_name in self.requirements:
            return self.requirements[project_name]

        raise KeyError("No project with the name %r" % name)

    def cleanup_files(self):
        # type: () -> None
        """Clean up files, remove builds."""
        logger.debug('Cleaning up...')
        with indent_log():
            for req in self.reqs_to_cleanup:
                req.remove_temporary_source()
コード例 #53
0
                test_batch.append(utils.sample_episodes_eval_reacher(env, model, ensemble_size, MAX_STEPS, SPARSE_REWARD, SCREEN_SHOT, use_cuda, device))

            # get the test_batch stats
            test_reward_mean_dict[agent_start], test_reward_std_dict[agent_start], test_reward_max_dict[agent_start], test_reward_accum_dict[agent_start], test_step_counts_mean_dict[agent_start], test_step_counts_std_dict[agent_start], test_elite_ratio_dict[agent_start] = utils.get_batch_stats_eval(
                test_batch, PERCENTILE)

            elapsed_time = time.time() - start_time
            print('======================== State {} ========================'.format((state_num, len(agent_start_list))))
            print(
                "%d: test_reward_mean=%.5f, test_reward_std=%.5f, test_elite_ratio=%.3f, \tElapsed:%s" % (
                    gan_epoch, test_reward_mean_dict[agent_start], test_reward_std_dict[agent_start], test_elite_ratio_dict[agent_start],
                    time.strftime("%H:%M:%S", time.gmtime(elapsed_time))))

        test_duration = time.time() - test_start

        test_reward_mean = np.mean(list(test_reward_mean_dict.values()))
        test_reward_std = np.max(list(test_reward_std_dict.values()))
        test_reward_max = np.max(list(test_reward_max_dict.values()))
        test_reward_accum = np.sum(list(test_reward_accum_dict.values()))
        test_step_counts_mean = np.mean(list(test_step_counts_mean_dict.values()))
        test_step_counts_std = np.max(list(test_step_counts_std_dict.values()))
        test_elite_ratio = np.mean(list(test_elite_ratio_dict.values()))

        print('================================= Test Sample Complete \tDuration:%s ======================================='% (time.strftime("%H:%M:%S", time.gmtime(test_duration))))

        # sample episodes from current msCEM-policy and train a step
        print('======================== Sampling and training msCEM (1 step) ========================')
        train_start_cem = time.time()

        # if multivariate CEM distribution too narrow, reinitialize
        theta_std_mean = np.mean(theta_std)
コード例 #54
0
ファイル: keymap.py プロジェクト: vikrant1717/plover
class Keymap:

    def __init__(self, keys, actions):
        # List of supported actions.
        self._actions = OrderedDict((action, n)
                                    for n, action
                                    in enumerate(actions))
        self._actions['no-op'] = len(self._actions)
        # List of supported keys.
        self._keys = OrderedDict((key, n)
                                 for n, key
                                 in enumerate(keys))
        # action -> keys
        self._mappings = {}
        # key -> action
        self._bindings = {}

    def get_keys(self):
        return self._keys.keys()

    def get_actions(self):
        return self._actions.keys()

    def set_bindings(self, bindings):
        # Set from:
        # { key1: action1, key2: action1, ... keyn: actionn }
        mappings = defaultdict(list)
        for key, action in dict(bindings).items():
            mappings[action].append(key)
        self.set_mappings(mappings)

    def set_mappings(self, mappings):
        # When setting from a string, assume a list of mappings:
        # [[action1, [key1, key2]], [action2, [key3]], ...]
        if isinstance(mappings, str):
            mappings = json.loads(mappings)
        mappings = dict(mappings)
        # Set from:
        # { action1: [key1, key2], ... actionn: [keyn] }
        self._mappings = OrderedDict()
        self._bindings = {}
        bound_keys = defaultdict(list)
        errors = []
        for action in self._actions:
            key_list = mappings.get(action)
            if not key_list:
                # Not an issue if 'no-op' is not mapped...
                if action != 'no-op':
                    errors.append('action %s is not bound' % action)
                # Add dummy mapping for each missing action
                # so it's shown in the configurator.
                self._mappings[action] = ()
                continue
            if isinstance(key_list, str):
                key_list = (key_list,)
            valid_key_list = []
            for key in key_list:
                if key not in self._keys:
                    errors.append('invalid key %s bound to action %s' % (key, action))
                    continue
                valid_key_list.append(key)
                bound_keys[key].append(action)
                self._bindings[key] = action
            self._mappings[action] = tuple(sorted(valid_key_list, key=self._keys.get))
        for action in (set(mappings) - set(self._actions)):
            key_list = mappings.get(action)
            if isinstance(key_list, str):
                key_list = (key_list,)
            errors.append('invalid action %s mapped to key(s) %s' % (action, ' '.join(key_list)))
        for key, action_list in bound_keys.items():
            if len(action_list) > 1:
                errors.append('key %s is bound multiple times: %s' % (key, str(action_list)))
        if len(errors) > 0:
            log.warning('Keymap is invalid, behavior undefined:\n\n- ' + '\n- '.join(errors))

    def get_bindings(self):
        return self._bindings

    def get_mappings(self):
        return self._mappings

    def get_action(self, key, default=None):
        return self._bindings.get(key, default)

    def keys_to_actions(self, key_list):
        action_list = []
        for key in key_list:
            assert key in self._keys, "'%s' not in %s" % (key, self._keys)
            action = self._bindings[key]
            if 'no-op' != action:
                action_list.append(action)
        return action_list

    def keys(self):
        return self._mappings.keys()

    def values(self):
        return self._mappings.values()

    def __len__(self):
        return len(self._mappings)

    def __getitem__(self, key):
        return self._mappings[key]

    def __setitem__(self, action, key_list):
        assert action in self._actions
        if isinstance(key_list, str):
            key_list = (key_list,)
        # Delete previous bindings.
        if action in self._mappings:
            for old_key in self._mappings[action]:
                if old_key in self._bindings:
                    del self._bindings[old_key]
        errors = []
        valid_key_list = []
        for key in key_list:
            if key not in self._keys:
                errors.append('invalid key %s bound to action %s' % (key, action))
                continue
            if key in self._bindings:
                errors.append('key %s is already bound to: %s' % (key, self._bindings[key]))
                continue
            valid_key_list.append(key)
            self._bindings[key] = action
        self._mappings[action] = tuple(sorted(valid_key_list, key=self._keys.get))
        if len(errors) > 0:
            log.warning('Keymap is invalid, behavior undefined:\n\n- ' + '\n- '.join(errors))

    def __iter__(self):
        return iter(self._mappings)

    def __eq__(self, other):
        return self.get_mappings() == other.get_mappings()

    def __str__(self):
        # Use the more compact list of mappings format:
        # [[action1, [key1, key2]], [action2, [key3]], ...]
        return json.dumps(list(self._mappings.items()))
コード例 #55
0
class Netlist:
    """This class implements a base class for a netlist.

    .. note:: This class is completed with element shortcuts when the module is loaded.

    """

    _logger = _module_logger.getChild('Netlist')

    ##############################################

    def __init__(self):

        self._ground_name = 0
        self._nodes = {}
        self._ground_node = self._add_node(self._ground_name)

        self._subcircuits = OrderedDict()  # to keep the declaration order
        self._elements = OrderedDict()  # to keep the declaration order
        self._models = {}

        self.raw_spice = ''

        # self._graph = networkx.Graph()

    ##############################################

    def copy_to(self, netlist):

        for subcircuit in self.subcircuits:
            netlist.subcircuit(subcircuit)

        for element in self.elements:
            element.copy_to(netlist)

        for name, model in self._models.items():
            netlist._models[name] = model.clone()

        netlist.raw_spice = str(self.raw_spice)

        return netlist

    ##############################################

    @property
    def gnd(self):
        return self._ground

    @property
    def nodes(self):
        return self._nodes.values()

    @property
    def node_names(self):
        return self._nodes.keys()

    @property
    def elements(self):
        return self._elements.values()

    @property
    def element_names(self):
        return self._elements.keys()

    @property
    def models(self):
        return self._models.values()

    @property
    def model_names(self):
        return self._models.keys()

    @property
    def subcircuits(self):
        return self._subcircuits.values()

    @property
    def subcircuit_names(self):
        return self._subcircuits.keys()

    ##############################################

    def element(self, name):
        return self._elements[name]

    def model(self, name):
        return self._models[name]

    def node(self, name):
        return self._nodes[name]

    ##############################################

    def __getitem__(self, attribute_name):

        if attribute_name in self._elements:
            return self.element(attribute_name)
        elif attribute_name in self._models:
            return self.model(attribute_name)
        # Fixme: subcircuits
        elif attribute_name in self._nodes:
            return self.node(attribute_name)
        else:
            raise IndexError(attribute_name)  # KeyError

    ##############################################

    def __getattr__(self, attribute_name):
        try:
            return self.__getitem__(attribute_name)
        except IndexError:
            raise AttributeError(attribute_name)

    ##############################################

    def _add_node(self, node_name):
        node_name = str(node_name)
        if node_name not in self._nodes:
            node = Node(self, node_name)
            self._nodes[node_name] = node
            return node
        else:
            raise ValueError("Node {} is already defined".format(node_name))

    ##############################################

    def _update_node_name(self, node, new_name):
        if node.name not in self._nodes:
            # should not happen
            raise ValueError("Unknown node")
        del self._nodes[node.name]
        self._nodes[new_name] = node

    ##############################################

    def get_node(self, node, create=False):
        if isinstance(node, Node):
            return node
        else:
            str_node = str(node)
            if str_node in self._nodes:
                return self._nodes[str_node]
            elif create:
                return self._add_node(str_node)
            else:
                raise KeyError("Node {} doesn't exists".format(node))

    ##############################################

    def has_ground_node(self):
        return bool(self._ground_node)

    ##############################################

    def _add_element(self, element):
        """Add an element."""
        if element.name not in self._elements:
            self._elements[element.name] = element
        else:
            raise NameError("Element name {} is already defined".format(
                element.name))

    ##############################################

    def _remove_element(self, element):
        try:
            del self._elements[element.name]
        except KeyError:
            raise NameError(
                "Cannot remove undefined element {}".format(element))

    ##############################################

    def model(self, name, modele_type, **parameters):
        """Add a model."""
        model = DeviceModel(name, modele_type, **parameters)
        if model.name not in self._models:
            self._models[model.name] = model
        else:
            raise NameError("Model name {} is already defined".format(name))

        return model

    ##############################################

    def subcircuit(self, subcircuit):
        """Add a sub-circuit."""
        # Fixme: subcircuit is a class
        self._subcircuits[str(subcircuit.name)] = subcircuit

    ##############################################

    def __str__(self):
        """ Return the formatted list of element and model definitions. """
        # Fixme: order ???
        netlist = self._str_raw_spice()
        netlist += self._str_subcircuits()  # before elements
        netlist += self._str_elements()
        netlist += self._str_models()
        return netlist

    ##############################################

    def _str_elements(self):
        elements = [element for element in self.elements if element.enabled]
        return join_lines(elements) + os.linesep

    ##############################################

    def _str_models(self):
        if self._models:
            return join_lines(self.models) + os.linesep
        else:
            return ''

    ##############################################

    def _str_subcircuits(self):
        if self._subcircuits:
            return join_lines(self.subcircuits)
        else:
            return ''

    ##############################################

    def _str_raw_spice(self):
        netlist = self.raw_spice
        if netlist and not netlist.endswith(os.linesep):
            netlist += os.linesep
        return netlist
コード例 #56
0
ファイル: edit_dialog.py プロジェクト: jni/Enrich2
class EditDialog(tkSimpleDialog.Dialog):
    """
    Dialog box for editing elements. Also used to set properties on newly-created elements.

    *parent_window* is the Tk window that owns this child window
    *tree* is the object containing the config tree and associated Treeview
    *new* is ``True`` if we are creating a new child of the focused item or ``False`` if we are editing the focused item
    """
    def __init__(self, parent_window, tree, element, title="Configure Object"):
        self.tree = tree
        self.element = element
        self.element_cfg = None
        self.frame_dict = OrderedDict()

        # create the editable version of the config object
        self.element_cfg = self.element.serialize()

        # dialog options common to all elements
        self.frame_dict['main'] = list()
        self.name_entry = StringEntry("Name", self.element_cfg, 'name', optional=False)
        self.frame_dict['main'].append(self.name_entry)
        if 'output directory' in self.element_cfg:
            self.frame_dict['main'].append(FileEntry("Output Directory", self.element_cfg, 'output directory', optional=self.element != self.tree.root_element, directory=True))
        if isinstance(self.element, SeqLib):
            self.frame_dict['fastq'] = list()
            self.frame_dict['filters'] = list()

            self.frame_dict['main'].append(SectionLabel("SeqLib Options"))
            self.frame_dict['main'].append(IntegerEntry("Time Point", self.element_cfg, 'timepoint'))
            self.frame_dict['main'].append(FileEntry("Counts File", self.element_cfg, 'counts file', optional=True, extensions=[".h5"]))

            self.frame_dict['filters'].append(SectionLabel("FASTQ Filtering"))
            # Removed chastity filtering option, due to issues with specifying a FASTQ header format
            # self.frame_dict['filters'].append(Checkbox("Chastity", self.element_cfg['fastq']['filters'], 'chastity'))
            self.frame_dict['filters'].append(IntegerEntry("Minimum Quality", self.element_cfg['fastq']['filters'], 'min quality', optional=True))
            self.frame_dict['filters'].append(IntegerEntry("Average Quality", self.element_cfg['fastq']['filters'], 'avg quality', optional=True))
            self.frame_dict['filters'].append(IntegerEntry("Maximum N's", self.element_cfg['fastq']['filters'], 'max N', optional=True))

            self.frame_dict['fastq'].append(SectionLabel("FASTQ Options"))
            if isinstance(self.element, OverlapSeqLib):
                self.frame_dict['fastq'].append(FileEntry("Forward Reads", self.element_cfg['fastq'], 'forward reads', extensions=_FASTQ_SUFFIXES))
                self.frame_dict['fastq'].append(FileEntry("Reverse Reads", self.element_cfg['fastq'], 'reverse reads', extensions=_FASTQ_SUFFIXES))
                self.frame_dict['overlap'] = list()
                self.frame_dict['overlap'].append(IntegerEntry("Forward Start", self.element_cfg['overlap'], 'forward start', minvalue=1))
                self.frame_dict['overlap'].append(IntegerEntry("Reverse Start", self.element_cfg['overlap'], 'reverse start', minvalue=1))
                self.frame_dict['overlap'].append(IntegerEntry("Overlap Length", self.element_cfg['overlap'], 'length', minvalue=1))
                self.frame_dict['overlap'].append(IntegerEntry("Maximum Mismatches", self.element_cfg['overlap'], 'max mismatches'))
                self.frame_dict['overlap'].append(Checkbox("Overlap Only", self.element_cfg['overlap'], 'trim'))
                self.frame_dict['filters'].append(Checkbox("Remove Unresolvable Overlaps", self.element_cfg['fastq']['filters'], 'remove unresolvable'))
            else:
                self.frame_dict['fastq'].append(FileEntry("Reads", self.element_cfg['fastq'], 'reads', extensions=_FASTQ_SUFFIXES))
                self.frame_dict['fastq'].append(Checkbox("Reverse", self.element_cfg['fastq'], 'reverse'))

            if isinstance(self.element, BarcodeSeqLib):
                self.frame_dict['barcodes'] = list()
                self.frame_dict['barcodes'].append(SectionLabel("Barcode Options"))
                if isinstance(self.element, BcvSeqLib) or isinstance(self.element, BcidSeqLib):
                    self.frame_dict['barcodes'].append(FileEntry("Barcode-variant File", self.element_cfg['barcodes'], 'map file'))
                self.frame_dict['barcodes'].append(IntegerEntry("Minimum Count", self.element_cfg['barcodes'], 'min count', optional=True))
                self.frame_dict['barcodes'].append(IntegerEntry("Trim Start", self.element_cfg['fastq'], 'start', optional=True, minvalue=1))
                self.frame_dict['barcodes'].append(IntegerEntry("Trim Length", self.element_cfg['fastq'], 'length', optional=True, minvalue=1))
            
            if isinstance(self.element, BcidSeqLib):
                self.frame_dict['identifiers'] = list()
                self.frame_dict['identifiers'].append(SectionLabel("Identifier Options"))
                self.frame_dict['identifiers'].append(IntegerEntry("Minimum Count", self.element_cfg['identifiers'], 'min count', optional=True))

            if isinstance(self.element, VariantSeqLib):
                self.frame_dict['variants'] = list()
                self.frame_dict['variants'].append(SectionLabel("Variant Options"))
                self.frame_dict['variants'].append(StringEntry("Wild Type Sequence", self.element_cfg['variants']['wild type'], 'sequence'))
                self.frame_dict['variants'].append(IntegerEntry("Wild Type Offset", self.element_cfg['variants']['wild type'], 'reference offset', optional=True, minvalue=-maxint - 1))
                self.frame_dict['variants'].append(Checkbox("Protein Coding", self.element_cfg['variants']['wild type'], 'coding'))
                self.frame_dict['variants'].append(Checkbox("Use Aligner", self.element_cfg['variants'], 'use aligner'))
                self.frame_dict['variants'].append(IntegerEntry("Minimum Count", self.element_cfg['variants'], 'min count', optional=True))
                self.frame_dict['filters'].append(IntegerEntry("Maximum Mutations", self.element_cfg['fastq']['filters'], 'max mutations', optional=True))

        tkSimpleDialog.Dialog.__init__(self, parent_window, title)


    def body(self, master):
        """
        Add the UI elements to the edit window. Ordering and placement of UI 
        elements in columns is defined by the ``element_layouts`` dictionary.
        """
        main = ttk.Frame(master, padding=(3, 3, 12, 12))
        main.grid(row=0, column=0, sticky="nsew")

        layout = element_layouts[type(self.element).__name__]
        for i, column_tuple in enumerate(layout):
            new_frame = ttk.Frame(master, padding=(3, 3, 12, 12))
            new_frame.grid(row=0, column=i, sticky="nsew")
            row_no = 0
            for row_frame_key in layout[i]:
                for ui_element in self.frame_dict[row_frame_key]:
                    row_no += ui_element.body(new_frame, row_no, left=True)


    def validate(self):
        """
        Called when the user chooses "OK", before closing the box.

        Also checks that child name is unique.
        """
        for tk_list in self.frame_dict.values():
            if not all(x.validate() for x in tk_list):
                return False

        if self.element.parent is not None:
            if self.element not in self.element.parent.children:
                if self.name_entry.value.get() in self.element.parent.child_names():
                    tkMessageBox.showwarning("", "Sibling names must be unique.")
                    return False

        return True


    def apply(self):
        """
        Called when the user chooses "OK" and the box closes.
        """
        # apply all changes to the config object
        for tk_list in self.frame_dict.values():
            for tk_element in tk_list:
                tk_element.apply()

        # use the configuration dictionary to change values
        if isinstance(self.element, SeqLib):
            self.element.configure(clear_nones(self.element_cfg))
        else:
            self.element.configure(clear_nones(self.element_cfg), configure_children=False)

        # insert into the object if necessary
        if self.element.parent is not None:
            if self.element not in self.element.parent.children:
                self.element.parent.add_child(self.element)
コード例 #57
0
    class Step(object):
        def __init__(self, printer, step):
            self.printer = printer
            self.step = step
            self.finished = False
            self.reprint = False
            self.substeps = OrderedDict()
            self.args = step.args_str
            if step.start_event:
                self.start_ts = datetime.datetime.strptime(
                    step.start_event.stamp, "%Y-%m-%dT%H:%M:%S.%f")
            else:
                self.start_ts = None

            if step.skipped:
                self.finished = True

        def _find_running_substep(self):
            for substep in self.substeps.values():
                if not substep.finished:
                    return substep
            return self

        def start_runner_substep(self, step):
            substep = self._find_running_substep()
            substep.substeps[step.jid] = SP.Runner(self.printer, step)

        def start_state_substep(self, step):
            substep = self._find_running_substep()
            substep.substeps[step.jid] = SP.State(self.printer, step)

        def finish_substep(self, step):
            if step.jid in self.substeps:
                self.substeps[step.jid].finished = True
                return True
            for substep in self.substeps.values():
                if substep.finish_substep(step):
                    return True
            return False

        # pylint: disable=W0613
        def print(self, offset, desc_width, depth):
            """
            Prints the status of a step
            """
            # if not self.reprint:
            #     self.reprint = True
            # else:
            #     self.clean(desc_width)

        def clean(self, desc_width):
            """
            Prepare for re-print of step
            """
            raise NotImplementedError()

        @staticmethod
        def ftime(tr):
            if tr.seconds > 0:
                return "{}s".format(
                    int(round(tr.seconds + tr.microseconds / 1000000.0)))
            else:
                return "{}s".format(
                    round(tr.seconds + tr.microseconds / 1000000.0, 1))
コード例 #58
0
ファイル: blob.py プロジェクト: hadihafizi/datajoint-python
    ('mxLOGICAL_CLASS', np.dtype('bool')),
    ('mxCHAR_CLASS', np.dtype('c')),
    ('mxVOID_CLASS', None),
    ('mxDOUBLE_CLASS', np.dtype('float64')),
    ('mxSINGLE_CLASS', np.dtype('float32')),
    ('mxINT8_CLASS', np.dtype('int8')),
    ('mxUINT8_CLASS', np.dtype('uint8')),
    ('mxINT16_CLASS', np.dtype('int16')),
    ('mxUINT16_CLASS', np.dtype('uint16')),
    ('mxINT32_CLASS', np.dtype('int32')),
    ('mxUINT32_CLASS', np.dtype('uint32')),
    ('mxINT64_CLASS', np.dtype('int64')),
    ('mxUINT64_CLASS', np.dtype('uint64')),
    ('mxFUNCTION_CLASS', None)))

rev_class_id = {dtype: i for i, dtype in enumerate(mxClassID.values())}
dtype_list = list(mxClassID.values())

decode_lookup = {
    b'ZL123\0': zlib.decompress
}


class BlobReader:
    def __init__(self, blob, simplify=False):
        self._simplify = simplify
        self._blob = blob
        self._pos = 0

    @property
    def pos(self):
コード例 #59
0
    def get_nav_menu(self):
        site_menu = list(self.get_site_menu() or [])
        had_urls = []

        def get_url(menu, had_urls):
            if 'url' in menu:
                had_urls.append(menu['url'])
            if 'menus' in menu:
                for m in menu['menus']:
                    get_url(m, had_urls)

        get_url({'menus': site_menu}, had_urls)

        nav_menu = OrderedDict()

        for model, model_admin in self.admin_site._registry.items():
            if getattr(model_admin, 'hidden_menu', False):
                continue
            app_label = model._meta.app_label
            app_icon = None
            model_dict = {
                'title': smart_text(capfirst(model._meta.verbose_name_plural)),
                'url': self.get_model_url(model, "changelist"),
                'icon': self.get_model_icon(model),
                'perm': self.get_model_perm(model, 'view'),
                'order': model_admin.order,
            }
            if model_dict['url'] in had_urls:
                continue

            app_key = "app:%s" % app_label
            if app_key in nav_menu:
                nav_menu[app_key]['menus'].append(model_dict)
            else:
                # Find app title
                app_title = smart_text(app_label.title())
                if app_label.lower() in self.apps_label_title:
                    app_title = self.apps_label_title[app_label.lower()]
                else:
                    app_title = smart_text(
                        apps.get_app_config(app_label).verbose_name)
                #find app icon
                if app_label.lower() in self.apps_icons:
                    app_icon = self.apps_icons[app_label.lower()]

                nav_menu[app_key] = {
                    'title': app_title,
                    'menus': [model_dict],
                }

            app_menu = nav_menu[app_key]
            if app_icon:
                app_menu['first_icon'] = app_icon
            elif ('first_icon' not in app_menu or app_menu['first_icon']
                  == self.default_model_icon) and model_dict.get('icon'):
                app_menu['first_icon'] = model_dict['icon']

            if 'first_url' not in app_menu and model_dict.get('url'):
                app_menu['first_url'] = model_dict['url']

        for menu in nav_menu.values():
            menu['menus'].sort(key=sortkeypicker(['order', 'title']))

        nav_menu = list(nav_menu.values())
        # nav_menu.sort(key=lambda x: x['title'])

        site_menu.extend(nav_menu)

        return site_menu
コード例 #60
0
	def key(self, vect):
		dic = vect.vocabulary_
		sorted_x = OrderedDict(sorted(dic.items(), key=lambda t: t[1]))
		index = sorted_x.values()
		keys = sorted_x.keys()
		return index, keys