コード例 #1
0
    def get_header(self, data, renderer_context):
        """Return the list of header fields, determined by class settings and context."""

        # Start with the previously-set list of header fields
        header = renderer_context.get('header', self.header)

        # If no previous set, then determine the candidates from the data
        if header is None:
            header = set()
            data = self.flatten_data(data)
            for item in data:
                header.update(list(item.keys()))

            # Alphabetize header fields by default, since
            # flatten_data() makes field order indeterminate.
            header = sorted(header)

        # If configured to, examine the query parameters for the requsted header fields
        request = renderer_context.get('request')
        if request is not None and self.fields_param is not None:

            request_fields = request.query_params.get(self.fields_param)
            if request_fields is not None:

                requested = OrderedSet()
                for request_field in request_fields.split(self.fields_sep):

                    # Only fields in the original candidate header set are valid
                    if request_field in header:
                        requested.update((request_field,))

                header = requested  # pylint: disable=redefined-variable-type

        return header
コード例 #2
0
    def get_header(self, data, renderer_context):
        """Return the list of header fields, determined by class settings and context."""

        # Start with the previously-set list of header fields
        header = renderer_context.get('header', self.header)

        # If no previous set, then determine the candidates from the data
        if header is None:
            header = set()
            data = self.flatten_data(data)
            for item in data:
                header.update(list(item.keys()))

            # Alphabetize header fields by default, since
            # flatten_data() makes field order indeterminate.
            header = sorted(header)

        # If configured to, examine the query parameters for the requsted header fields
        request = renderer_context.get('request')
        if request is not None and self.fields_param is not None:

            request_fields = request.query_params.get(self.fields_param)
            if request_fields is not None:

                requested = OrderedSet()
                for request_field in request_fields.split(self.fields_sep):

                    # Only fields in the original candidate header set are valid
                    if request_field in header:
                        requested.update((request_field, ))

                header = requested

        return header
コード例 #3
0
ファイル: pandas_transformer.py プロジェクト: vemonet/kgx
    def _order_edge_columns(cols: Set) -> OrderedSet:
        """
        Arrange edge columns in a defined order.

        Parameters
        ----------
        cols: Set
            A set with elements in any order

        Returns
        -------
        OrderedSet
            A set with elements in a defined order

        """
        edge_columns = cols.copy()
        ORDER = OrderedSet([
            'id', 'subject', 'edge_label', 'object', 'relation', 'provided_by'
        ])
        ordered_columns = OrderedSet()
        for c in ORDER:
            if c in edge_columns:
                ordered_columns.add(c)
                edge_columns.remove(c)
        ordered_columns.update(edge_columns)
        return ordered_columns
コード例 #4
0
ファイル: tsv_sink.py プロジェクト: STARInformatics/kgx
    def _order_node_columns(cols: Set) -> OrderedSet:
        """
        Arrange node columns in a defined order.

        Parameters
        ----------
        cols: Set
            A set with elements in any order

        Returns
        -------
        OrderedSet
            A set with elements in a defined order

        """
        node_columns = cols.copy()
        core_columns = OrderedSet([
            'id', 'category', 'name', 'description', 'xref', 'provided_by',
            'synonym'
        ])
        ordered_columns = OrderedSet()
        for c in core_columns:
            if c in node_columns:
                ordered_columns.add(c)
                node_columns.remove(c)
        internal_columns = set()
        remaining_columns = node_columns.copy()
        for c in node_columns:
            if c.startswith('_'):
                internal_columns.add(c)
                remaining_columns.remove(c)
        ordered_columns.update(sorted(remaining_columns))
        ordered_columns.update(sorted(internal_columns))
        return ordered_columns
コード例 #5
0
ファイル: tsv_sink.py プロジェクト: STARInformatics/kgx
    def _order_edge_columns(cols: Set) -> OrderedSet:
        """
        Arrange edge columns in a defined order.

        Parameters
        ----------
        cols: Set
            A set with elements in any order

        Returns
        -------
        OrderedSet
            A set with elements in a defined order

        """
        edge_columns = cols.copy()
        core_columns = OrderedSet([
            'id', 'subject', 'predicate', 'object', 'category', 'relation',
            'provided_by'
        ])
        ordered_columns = OrderedSet()
        for c in core_columns:
            if c in edge_columns:
                ordered_columns.add(c)
                edge_columns.remove(c)
        internal_columns = set()
        remaining_columns = edge_columns.copy()
        for c in edge_columns:
            if c.startswith('_'):
                internal_columns.add(c)
                remaining_columns.remove(c)
        ordered_columns.update(sorted(remaining_columns))
        ordered_columns.update(sorted(internal_columns))
        return ordered_columns
コード例 #6
0
ファイル: test.py プロジェクト: bmwant/ordered-set
def test_update():
    set1 = OrderedSet('abcd')
    set1.update('efgh')

    assert len(set1) == 8
    assert set1[0] == 'a'
    assert set1[7] == 'h'
コード例 #7
0
ファイル: fixture_store.py プロジェクト: yaelmi3/slash
 def _compute_all_needed_parametrization_ids(self, fixtureobj):
     stack = [(fixtureobj.info.id, [fixtureobj.info.id],
               set([fixtureobj.info.id]))]
     returned = OrderedSet()
     while stack:
         fixture_id, path, visited = stack.pop()
         if fixture_id in self._all_needed_parametrization_ids_by_fixture_id:
             returned.update(
                 self.
                 _all_needed_parametrization_ids_by_fixture_id[fixture_id])
             continue
         fixture = self._fixtures_by_id[fixture_id]
         if fixture.parametrization_ids:
             assert isinstance(fixture.parametrization_ids, OrderedSet)
             returned.update(fixture.parametrization_ids)
         if fixture.keyword_arguments:
             for needed in fixture.keyword_arguments.values():
                 if needed.is_parameter():
                     continue
                 needed_id = needed.info.id
                 if needed_id in visited:
                     self._raise_cyclic_dependency_error(
                         fixtureobj, path, needed_id)
                 stack.append((needed_id, path + [needed_id],
                               visited | set([needed_id])))
     return returned
コード例 #8
0
ファイル: tsv_sink.py プロジェクト: deepakunni3/kgx
    def _order_node_columns(cols: Set) -> OrderedSet:
        """
        Arrange node columns in a defined order.

        Parameters
        ----------
        cols: Set
            A set with elements in any order

        Returns
        -------
        OrderedSet
            A set with elements in a defined order

        """
        node_columns = cols.copy()
        core_columns = OrderedSet([
            "id", "category", "name", "description", "xref", "provided_by",
            "synonym"
        ])
        ordered_columns = OrderedSet()
        for c in core_columns:
            if c in node_columns:
                ordered_columns.add(c)
                node_columns.remove(c)
        internal_columns = set()
        remaining_columns = node_columns.copy()
        for c in node_columns:
            if c.startswith("_"):
                internal_columns.add(c)
                remaining_columns.remove(c)
        ordered_columns.update(sorted(remaining_columns))
        ordered_columns.update(sorted(internal_columns))
        return ordered_columns
コード例 #9
0
ファイル: fitresult.py プロジェクト: sleepy-owl/zfit
    def __str__(self) -> str:
        order_keys = ['value', 'hesse']
        keys = OrderedSet()
        for pdict in self.values():
            keys.update(OrderedSet(pdict))
        order_keys = OrderedSet([key for key in order_keys if key in keys])
        order_keys.update(keys)

        rows = []
        for param, pdict in self.items():
            row = [param.name]
            row.extend(format_value(pdict.get(key, ' ')) for key in order_keys)
            row.append(
                color_on_bool(run(param.at_limit),
                              on_true=colored.bg('light_red'),
                              on_false=False))
            rows.append(row)

        order_keys = ['name'] + list(order_keys) + ['at limit']
        table = tabulate(rows,
                         order_keys,
                         numalign="right",
                         stralign='right',
                         colalign=('left', ))
        return table
コード例 #10
0
ファイル: ecore.py プロジェクト: moltob/pyecore
 def update(self, *others):
     self._orderedset_update = True
     OrderedSet.update(self, others)
     self._owner.notify(Notification(new=others,
                                     feature=self._efeature,
                                     kind=Kind.ADD_MANY))
     self._owner._isset.add(self._efeature)
     self._orderedset_update = False
コード例 #11
0
    def __edges_to_set(pairs, mask_edges=()):
        nodes = OrderedSet()

        for pair in pairs:
            if pair in mask_edges:
                nodes.update(pair)

        return nodes
コード例 #12
0
ファイル: charts.py プロジェクト: kmgowda/SBK
    def create_summary_sheet(self):
        BLACK = 'FF000000'
        WHITE = 'FFFFFFFF'
        RED = 'FFFF0000'
        DARKRED = 'FF800000'
        BLUE = 'FF0000FF'
        DARKBLUE = 'FF000080'
        GREEN = 'FF00FF00'
        DARKGREEN = 'FF008000'
        YELLOW = 'FFFFFF00'
        DARKYELLOW = 'FF808000'

        acts = self.get_actions_storage_map()
        sheet = self.wb.create_sheet("Summary")
        row = 7
        col = 7
        sheet.column_dimensions[get_column_letter(col)].width = 25
        sheet.column_dimensions[get_column_letter(col + 1)].width = 50
        cell = sheet.cell(row, col + 1)
        cell.value = "SBK Charts "
        cell.font = Font(size="47", bold=True, color=DARKBLUE)
        cell.alignment = Alignment(horizontal='center')
        row += 1
        cell = sheet.cell(row, col + 1)
        cell.value = "SBK Version : " + self.version
        cell.font = Font(size="27", bold=True, color=DARKYELLOW)
        cell.alignment = Alignment(horizontal='center')
        row += 1
        drivers = OrderedSet()
        for values in acts.values():
            drivers.update(values)
        text = "Performance Analysis of Storage Drivers :  " + ", ".join(
            drivers)
        cell = sheet.cell(row, col)
        cell.value = text
        cell.font = Font(size="27", bold=True, color=RED)
        row += 1
        cell = sheet.cell(row, col)
        cell.value = "Time Unit"
        cell.font = Font(size="18", bold=False, color=BLUE)
        cell = sheet.cell(row, col + 1)
        cell.value = self.get_time_unit(self.wb[constants.R_PREFIX + "1"])
        cell.font = Font(size="18", bold=False, color=BLACK)
        row += 1
        for i, key in enumerate(acts):
            cell = sheet.cell(row + i, col)
            cell.value = key
            text = key
            cell.font = Font(size="18", bold=False, color=DARKGREEN)
            cell = sheet.cell(row + i, col + 1)
            cell.value = ", ".join(acts[key])
            cell.font = Font(size="18", bold=False, color=DARKRED)
            text += " : " + cell.value
            print(text)
コード例 #13
0
    def get_all_events(self):
        """
        Get all registered events plus the event for the total progress.
        Assume each node has the same events.

        Returns
        -------
        OrderedSet<str>
        """
        s = OrderedSet([self.EVENT_TOTAL_PROGRESS])
        s.update(self.get_events())
        return s
コード例 #14
0
    def set_of_transactions(self):
        '''Get all transactions of blockchain in an `OrderedSet`.

        Returns:

        * `OrderedSet` of `Transaction`s.'''

        transactions_set = OrderedSet()

        for blck in self.chain:
            transactions_set.update(blck.list_of_transactions)

        return transactions_set
コード例 #15
0
ファイル: bym.py プロジェクト: alexey-lysiuk/bym
def _add_prerequisites(targets):
    prerequisites = OrderedSet(configuration.prerequisites)

    for target in targets:
        commands = repository.package(target).commands

        if isinstance(commands, (tuple, list)):
            for cmd in commands:
                prerequisites.update(cmd.prerequisites())
        else:
            prerequisites.update(commands.prerequisites())

    targets[0:0] = list(prerequisites)
コード例 #16
0
ファイル: event.py プロジェクト: pronovic/apologies-server
 def message(
     self,
     message: Message,
     websockets: Optional[List[WebSocketServerProtocol]] = None,
     players: Optional[List[TrackedPlayer]] = None,
 ) -> None:
     """Enqueue a task to send a message to one or more destination websockets."""
     destinations = OrderedSet(websockets) if websockets else OrderedSet()
     destinations.update(
         [player.websocket for player in players
          if player.websocket] if players else [])
     self.messages.extend([(message.to_json(), destination)
                           for destination in destinations])
コード例 #17
0
ファイル: test.py プロジェクト: LuminosoInsight/ordered-set
def test_update():
    set1 = OrderedSet('abcd')
    result = set1.update('efgh')

    assert result == 7
    assert len(set1) == 8
    assert ''.join(set1) == 'abcdefgh'

    set2 = OrderedSet('abcd')
    result = set2.update('cdef')
    assert result == 5
    assert len(set2) == 6
    assert ''.join(set2) == 'abcdef'
コード例 #18
0
ファイル: test.py プロジェクト: wimglenn/ordered-set
def test_update():
    set1 = OrderedSet('abcd')
    result = set1.update('efgh')

    assert result == 7
    assert len(set1) == 8
    assert ''.join(set1) == 'abcdefgh'

    set2 = OrderedSet('abcd')
    result = set2.update('cdef')
    assert result == 5
    assert len(set2) == 6
    assert ''.join(set2) == 'abcdef'
コード例 #19
0
ファイル: tcga.py プロジェクト: aiswaryasankar/maml
def _split_tcga(tcga_metadataset, counts):
    all_allowed_samples = tcga_metadataset.task_ids

    # We first uniquely assing every sample to a task
    sample_to_task_assignment = _assign_samples(tcga_metadataset)

    keys = [i for i in all_allowed_samples.keys()]
    difference = set(sample_to_task_assignment.keys()).difference(set(keys))

    unassigned_samples = OrderedSet()
    for key in difference:
        unassigned_samples.update(sample_to_task_assignment[key])

    # Second we split the metadataset
    # with a torch-based random sample
    permutation = torch.randperm(len(keys)).numpy()

    metadatasets = []
    start = 0
    end = 0
    for count in counts:
        end += count
        current_keys = [keys[index] for index in permutation[start:end]]
        metadatasets.append(
            {key: sample_to_task_assignment[key]
             for key in current_keys})
        start = end

    expanded_metadatasets = [None] * len(metadatasets)
    order = np.argsort([len(metadataset) for metadataset in metadatasets])

    # Finally we expand the tasks by reusing samples wherever possible in the sets
    blacklist = OrderedSet()
    for i in order:
        additional_samples = unassigned_samples.difference(blacklist)
        expanded_metadataset, used_additional_samples = _expand_sample_usage(
            metadatasets[i], all_allowed_samples, additional_samples)
        expanded_metadatasets[i] = (expanded_metadataset)
        blacklist.update(used_additional_samples)

    tcga_metadatasets = []
    tcga_metadataset.close()
    preloaded = tcga_metadataset.preloaded
    for metadataset in expanded_metadatasets:
        current_tcga_metadataset = copy.deepcopy(tcga_metadataset)
        current_tcga_metadataset.task_ids = metadataset
        if preloaded:
            current_tcga_metadataset.open()
        tcga_metadatasets.append(current_tcga_metadataset)

    return tcga_metadatasets
コード例 #20
0
class Proposed(object):
    def __init__(self):
        self._actions = OrderedSet()
        self._fluents = OrderedSet()

    def __repr__(self):
        ret = "Proposed Actions: %s\n" % (str(
            [a for a in self._actions if a.BaseClass is ACTION]))
        ret += "Proposed Fluents: %s\n" % (str(self._fluents))
        return ret

    def __eq__(self, other):
        return self._to_tuple() == other._to_tuple()

    def __hash__(self):
        return hash(self.to_tuple())

    def to_tuple(self):
        return (
            tuple(a.to_tuple() for a in self.actions),
            tuple(f.to_tuple() for f in self.fluents),
        )

    @property
    def actions(self):
        return self._actions

    def add_action(self, action):
        self._actions.add(action)

    def add_actions(self, actions):
        self._actions.update(actions)

    def clear_actions(self):
        self._actions = OrderedSet()

    @property
    def fluents(self):
        return self._fluents

    def add_fluent(self, fluent):
        self._fluents.add(fluent)

    def clear_fluents(self):
        self._fluents = OrderedSet()

    def reset(self):
        self.clear_actions()
        self.clear_fluents()
コード例 #21
0
    def iter_variations(self):
        needed_ids = OrderedSet()
        self._needed_fixtures.sort(key=lambda x: x.info.scope, reverse=True)
        for fixture in self._needed_fixtures:
            needed_ids.update(self._store.get_all_needed_fixture_ids(fixture))

        parametrizations = [
            self._store.get_fixture_by_id(param_id) for param_id in needed_ids
        ]
        if not needed_ids:
            yield Variation(self._store, {}, {})
            return
        for value_indices in itertools.product(*(range(len(p.values))
                                                 for p in parametrizations)):
            yield self._build_variation(parametrizations, value_indices)
コード例 #22
0
ファイル: tcga.py プロジェクト: aiswaryasankar/maml
def _expand_sample_usage(meta_dataset, all_allowed_samples,
                         additional_samples):
    expanded_metadataset = {}
    all_samples_of_metadataset = OrderedSet()
    for key, value in meta_dataset.items():
        all_samples_of_metadataset.update(value)
    all_samples_of_metadataset.update(additional_samples)

    used_additional_samples = OrderedSet()
    for key in meta_dataset.keys():
        allowed_samples = set(all_allowed_samples[key])
        intersection = allowed_samples.intersection(all_samples_of_metadataset)
        expanded_metadataset[key] = list(intersection)
        used_additional_samples = additional_samples.intersection(intersection)

    return expanded_metadataset, used_additional_samples
コード例 #23
0
        def _visit_relatives(artist_id):
            visited_artist_ids = OrderedSet([artist_id])
            artist_ids = OrderedSet([artist_id])
            depth = 0
            while not halt_condition(visited_artist_ids, depth):
                self.logger.debug("%d artists on level %d for whom to gather relatives.", len(artist_ids), depth)
                relative_ids = OrderedSet()
                for artist_id in artist_ids:
                    relative_ids.update(self.spotify_client.related_artist_ids(artist_id))
                relative_ids -= visited_artist_ids
                relative_ids -= excluded_artist_ids
                self.logger.debug("After removing relatives either excluded or already visited, %d new relatives found "
                        "on level %d.", len(relative_ids), depth)
                visited_artist_ids.update(relative_ids)

                artist_ids = relative_ids
                depth += 1
            return visited_artist_ids
コード例 #24
0
    def get_dirs(self):
        dirs = OrderedSet(super(FilesystemLoader, self).get_dirs())

        if connection.tenant and not isinstance(connection.tenant, FakeTenant):
            try:
                template_dirs = settings.MULTITENANT_TEMPLATE_DIRS
            except AttributeError:
                raise ImproperlyConfigured(
                    "To use %s.%s you must define the MULTITENANT_TEMPLATE_DIRS"
                    % (__name__, FilesystemLoader.__name__))

            for template_dir in reversed(template_dirs):
                dirs.update([
                    template_dir % (connection.tenant.domain_url, )
                    if "%s" in template_dir else template_dir,
                ])

        return [each for each in reversed(dirs)]
コード例 #25
0
    def log_metrics(self,
                    train_metrics: dict,
                    val_metrics: dict = None,
                    log_to_console: bool = False) -> None:
        """
        Sends all of the train metrics (and validation metrics, if provided) to tensorboard.
        """
        metric_names = OrderedSet(train_metrics.keys())
        if val_metrics is not None:
            metric_names.update(val_metrics.keys())
        val_metrics = val_metrics or {}

        # For logging to the console
        if log_to_console:
            dual_message_template = "%s |  %8.3f  |  %8.3f"
            no_val_message_template = "%s |  %8.3f  |  %8s"
            no_train_message_template = "%s |  %8s  |  %8.3f"
            header_template = "%s |  %-10s"
            name_length = max([len(x) for x in metric_names])
            logger.info(header_template, "Training".rjust(name_length + 13),
                        "Validation")

        for name in metric_names:
            # Log to tensorboard
            train_metric = train_metrics.get(name)
            if train_metric is not None:
                self.add_train_scalar(name, train_metric)
            val_metric = val_metrics.get(name)
            if val_metric is not None:
                self.add_validation_scalar(name, val_metric)

            # And maybe log to console
            if log_to_console and val_metric is not None and train_metric is not None:
                logger.info(dual_message_template, name.ljust(name_length),
                            train_metric, val_metric)
            elif log_to_console and val_metric is not None:
                logger.info(no_train_message_template, name.ljust(name_length),
                            "N/A", val_metric)
            elif log_to_console and train_metric is not None:
                logger.info(no_val_message_template, name.ljust(name_length),
                            train_metric, "N/A")
コード例 #26
0
ファイル: json2csv.py プロジェクト: sremy/scripts
def write_csv_file(json_array_to_convert, csv_file_path: str,
                   key_whitelist: list):
    list_processed_data = []
    header = OrderedSet()
    for item in json_array_to_convert:
        map_column_flatitem = {}
        prefix = ""
        flatten_item(map_column_flatitem, prefix, item, key_whitelist)
        list_processed_data.append(map_column_flatitem)
        header.update(map_column_flatitem.keys())

    csv.register_dialect("my_dialect", my_dialect)
    with open(csv_file_path, 'w+') as f:  # https://stackoverflow.com/a/1170297
        #with open(csv_file_path, 'w+', newline='') as f: # prevents python to replace \n by \r\n on Windows
        writer = csv.DictWriter(f, header, dialect="my_dialect")
        writer.writeheader()
        for map_row in list_processed_data:
            writer.writerow(map_row)
            #print(map_row)

    print("[+] Completed writing CSV file with %d columns, %d lines" %
          (len(header), len(list_processed_data)))
コード例 #27
0
ファイル: fitresult.py プロジェクト: mozgit/zfit
    def __str__(self) -> str:
        order_keys = ['value', 'hesse']
        keys = OrderedSet()
        for pdict in self.values():
            keys.update(OrderedSet(pdict))
        order_keys = OrderedSet([key for key in order_keys if key in keys])
        order_keys.update(keys)

        rows = []
        for param, pdict in self.items():
            row = [param.name]
            row.extend(format_value(pdict.get(key, ' ')) for key in order_keys)
            row.append(
                color_on_bool(run(param.at_limit),
                              on_true=colored.bg('light_red'),
                              on_false=False))
            rows.append(row)

        order_keys = ['name'] + list(order_keys) + ['at limit']

        return tafo.generate_table(rows,
                                   order_keys,
                                   grid_style=tafo.AlternatingRowGrid(
                                       colored.bg(15), colored.bg(254)))
コード例 #28
0
class LocalCovisibilityMap(LocalMapBase):
    def __init__(self, map=None):
        super().__init__(map)

    def update_keyframes(self, kf_ref):
        with self._lock:
            assert (kf_ref is not None)
            self.keyframes = OrderedSet()
            self.keyframes.add(kf_ref)
            neighbor_kfs = [
                kf for kf in kf_ref.get_covisible_keyframes() if not kf.is_bad
            ]
            self.keyframes.update(neighbor_kfs)
            return self.keyframes

    def get_best_neighbors(self,
                           kf_ref,
                           N=Parameters.kLocalMappingNumNeighborKeyFrames):
        return kf_ref.get_best_covisible_keyframes(N)

    # update the local keyframes, the viewed points and the reference keyframes (that see the viewed points but are not in the local keyframes)
    def update(self, kf_ref):
        self.update_keyframes(kf_ref)
        return self.update_from_keyframes(self.keyframes)
コード例 #29
0
ファイル: pandas_transformer.py プロジェクト: vemonet/kgx
    def _order_node_columns(cols: Set) -> OrderedSet:
        """
        Arrange node columns in a defined order.

        Parameters
        ----------
        cols: Set
            A set with elements in any order

        Returns
        -------
        OrderedSet
            A set with elements in a defined order

        """
        node_columns = cols.copy()
        ORDER = OrderedSet(['id', 'name', 'category', 'provided_by'])
        ordered_columns = OrderedSet()
        for c in ORDER:
            if c in node_columns:
                ordered_columns.add(c)
                node_columns.remove(c)
        ordered_columns.update(node_columns)
        return ordered_columns
コード例 #30
0
ファイル: vivado.py プロジェクト: Yummot/enzi
def inc_dir_filter(files):
    """inc_dir_filter for vivado"""
    if not files:
        return ''

    dedup_files = OrderedSet()
    if isinstance(files, Mapping):
        m = map(lambda i: dedup_files.update(i), files.values())
    elif isinstance(files, list):
        m = map(lambda i: dedup_files.add(i), files)
    else:
        fmt = 'unreachable files type shouldn\'t be {}'
        msg = fmt.format(files.__class__.__name__)
        logger.error(msg)
        raise RuntimeError(msg)
    _ = set(m)
    return ' '.join(dedup_files)
コード例 #31
0
 def edge_types(self):
     res = OrderedSet()
     if self.formula_nodes:
         res.add(('formula', 'clause', None))
     res.add(('clause', 'variable', None))
     res.update([
         ('clause', self.ntype_atom, 1),
         ('clause', self.ntype_atom, 0),
         (self.ntype_atom, 'predicate', None)
     ])
     res.add(('term', 'function', None))
     if self.arg_order:
         res.update([
             (self.ntype_atom, 'argument', None),
             ('term', 'argument', None),
             ('argument', 'argument', None),
             ('argument', 'term', None),
             ('argument', 'variable', None)
         ])
     else:
         res.update([
             (self.ntype_atom, 'term', None),
             (self.ntype_atom, 'variable', None),
             ('term', 'term', None),
             ('term', 'variable', None)
         ])
     if self.equality_nodes:
         res.update([
             ('clause', 'equality', 1),
             ('clause', 'equality', 0),
             ('equality', 'term', None),
             ('equality', 'variable', None)
         ])
         if self.equality_predicate_edge:
             res.add(('equality', 'predicate', None))
     return res
コード例 #32
0
def define_what_needs_to_be_written() -> Tuple[Set[Override], Set[OverExc]]:
    """
    Creates three sets containing the definition of what we want to write as methods in the generated class.
    :return: a tuple of two sorted sets. The first set contains Override definitions, the second one OverExc
    definitions
    """

    # init containers
    to_override = OrderedSet()
    to_override_with_exception = OrderedSet()
    to_skip = set()

    # ** Base **
    # .__class__, .__mro__
    # .__doc__, .__name__, __module__, .__dict__
    to_skip.update({
        '__class__', '__mro__', '__doc__', '__name__', '__module__', '__dict__'
    })

    # ** Iterable **
    # .__iter__
    # to_override.update(__get_all_magic_methods(Iterable))
    # Actually this COULD work but creates infinite loops when a list comprehension is used in the expression [i for i in x]
    # so we prefer to raise an exception and tell users that list comprehensions are forbidden
    # to_skip.update({'__iter__'})
    to_override_with_exception.update(
        {OverExc('__iter__', unbound_method=iter)})

    # ** Iterator and Generator **
    # .__next__
    # to_override.update(__get_all_magic_methods(Iterator, Generator))
    to_override.add(Override('__next__', unbound_method=next))

    # ** Initializable Object **
    # .__new__, .__init__, .__del__
    to_skip.update({'__new__', '__init__', '__del__'})

    # ** Representable Object **
    # .__repr__, .__str__, .__bytes__, .__format__,
    # __sizeof__
    to_override_with_exception.add(OverExc('__str__', unbound_method=str))
    to_override_with_exception.add(OverExc('__repr__', unbound_method=repr))
    to_override_with_exception.add(OverExc('__bytes__', unbound_method=bytes))
    # this is a special case
    to_override_with_exception.add(OverExc('__format__',
                                           unbound_method=format))
    to_override_with_exception.add(
        OverExc('__sizeof__', unbound_method=getsizeof))

    # ** Comparable Objects **
    # .__lt__, .__le__, .__eq__, .__ne__, .__gt__, .__ge__
    # to_override.update(__get_all_magic_methods(Set))
    to_override.add(
        Override('__lt__',
                 pair_operator='<',
                 precedence_level='_PRECEDENCE_COMPARISON'))
    to_override.add(
        Override('__le__',
                 pair_operator='<=',
                 precedence_level='_PRECEDENCE_COMPARISON'))
    to_override.add(
        Override('__eq__',
                 pair_operator='==',
                 precedence_level='_PRECEDENCE_COMPARISON'))
    to_override.add(
        Override('__ne__',
                 pair_operator='!=',
                 precedence_level='_PRECEDENCE_COMPARISON'))
    to_override.add(
        Override('__gt__',
                 pair_operator='>',
                 precedence_level='_PRECEDENCE_COMPARISON'))
    to_override.add(
        Override('__ge__',
                 pair_operator='>=',
                 precedence_level='_PRECEDENCE_COMPARISON'))

    # ** Hashable Object **
    # .__hash__
    # to_override.update(__get_all_magic_methods(Hashable))
    to_override_with_exception.update(
        {OverExc('__hash__', unbound_method=hash)})

    # ** Truth-testable Object **
    # .__bool__
    to_override_with_exception.update(
        {OverExc('__bool__', unbound_method=bool)})

    # ** Object = Field container **
    #  .__getattribute__ (to avoid)
    # .__getattr__,.__setattr__, .__delattr__
    # .__dir__
    # .__slots__
    to_skip.update({
        '__getattribute__', '__setattr__', '__delattr__', '__dir__',
        '__slots__'
    })
    to_override.add(Override('__getattr__', unbound_method=getattr))

    # ** Object Descriptors **
    # .__get__ , .__set__, .__delete__, .__set_name__
    # to_override.update({'__get__'})
    to_skip.update({'__get__', '__set__', '__delete__', '__set_name__'})

    # ** Callable **
    # .__call__
    # to_override.update(__get_all_magic_methods(Callable))
    to_override.add(Override('__call__'))

    # ** Class **
    # .__instancecheck__, .__subclasscheck__
    # .__init_subclass__
    # .__subclasshook__, .__abstractmethods__
    # IMPOSSIBLE TO OVERRIDE: these 2 methods are CLASS methods, carried by the SECOND argument, not the first.
    # so isintance(x, int) calls __instancecheck__ on int, not on x !
    to_skip.update({'__instancecheck__', '__subclasscheck__'})
    to_skip.update(
        {'__init_subclass__', '__subclasshook__', '__abstractmethods__'})

    # ** Container **
    # .__contains__
    # to_override.update(__get_all_magic_methods(Container))
    to_skip.update({'__contains__'})

    # ** Sized Container **
    # .__len__, .__length_hint__
    to_override_with_exception.add(OverExc('__len__', unbound_method=len))

    # ** Iterable Container : see Iterable **
    # ** Reversible Container **
    # .__reversed__,
    # to_override.update(__get_all_magic_methods(Reversible))
    to_override.add(Override('__reversed__', unbound_method=reversed))

    # ** Subscriptable / Mapping Container **
    # .__getitem__, .__missing__, .__setitem__, .__delitem__,
    # to_override.update(__get_all_magic_methods(Mapping))
    to_override.add(Override('__getitem__'))
    to_override.add(Override('__missing__'))
    to_skip.update({'__setitem__', '__delitem__'})

    # ** Numeric types **
    #  .__add__, .__radd__, .__sub__, .__rsub__, .__mul__, .__rmul__, .__truediv__, .__rtruediv__,
    # .__mod__, .__rmod__, .__divmod__, .__rdivmod__, .__pow__, .__rpow__
    # .__matmul__, .__floordiv__, .__rfloordiv__
    # .__lshift__, .__rshift__, __rlshift__, __rrshift__
    # .__neg__, .__pos__, .__abs__, .__invert__
    # to_override.update(__get_all_magic_methods(Integral))
    to_override.add(
        Override('__add__',
                 pair_operator='+',
                 precedence_level='_PRECEDENCE_ADD_SUB'))
    to_override.add(
        Override('__radd__',
                 pair_operator='+',
                 is_operator_left=False,
                 precedence_level='_PRECEDENCE_ADD_SUB'))
    to_override.add(
        Override('__sub__',
                 pair_operator='-',
                 precedence_level='_PRECEDENCE_ADD_SUB'))
    to_override.add(
        Override('__rsub__',
                 pair_operator='-',
                 is_operator_left=False,
                 precedence_level='_PRECEDENCE_ADD_SUB'))
    to_override.add(
        Override('__mul__',
                 pair_operator='*',
                 precedence_level='_PRECEDENCE_MUL_DIV_ETC'))
    to_override.add(
        Override('__rmul__',
                 pair_operator='*',
                 is_operator_left=False,
                 precedence_level='_PRECEDENCE_MUL_DIV_ETC'))
    to_override.add(
        Override('__truediv__',
                 pair_operator='/',
                 precedence_level='_PRECEDENCE_MUL_DIV_ETC'))
    to_override.add(
        Override('__rtruediv__',
                 pair_operator='/',
                 is_operator_left=False,
                 precedence_level='_PRECEDENCE_MUL_DIV_ETC'))
    to_override.add(
        Override('__mod__',
                 pair_operator='%',
                 precedence_level='_PRECEDENCE_MUL_DIV_ETC'))
    to_override.add(
        Override('__rmod__',
                 pair_operator='%',
                 is_operator_left=False,
                 precedence_level='_PRECEDENCE_MUL_DIV_ETC'))
    to_override.add(Override('__divmod__'))
    to_override.add(Override('__rdivmod__'))
    to_override.add(
        Override('__pow__',
                 pair_operator='**',
                 precedence_level='_PRECEDENCE_EXPONENTIATION'))
    to_override.add(
        Override('__rpow__',
                 pair_operator='**',
                 is_operator_left=False,
                 precedence_level='_PRECEDENCE_EXPONENTIATION'))
    to_override.add(
        Override('__matmul__',
                 pair_operator='@',
                 precedence_level='_PRECEDENCE_MUL_DIV_ETC'))
    # Override('__rmatmul__', operator='@', is_operator_left=False),
    to_override.add(
        Override('__floordiv__',
                 pair_operator='//',
                 precedence_level='_PRECEDENCE_MUL_DIV_ETC'))
    to_override.add(
        Override('__rfloordiv__',
                 pair_operator='//',
                 is_operator_left=False,
                 precedence_level='_PRECEDENCE_MUL_DIV_ETC'))
    to_override.add(
        Override('__lshift__',
                 pair_operator='<<',
                 precedence_level='_PRECEDENCE_SHIFTS'))
    to_override.add(
        Override('__rlshift__',
                 pair_operator='<<',
                 is_operator_left=False,
                 precedence_level='_PRECEDENCE_SHIFTS'))
    to_override.add(
        Override('__rshift__',
                 pair_operator='>>',
                 precedence_level='_PRECEDENCE_SHIFTS'))
    to_override.add(
        Override('__rrshift__',
                 pair_operator='>>',
                 is_operator_left=False,
                 precedence_level='_PRECEDENCE_SHIFTS'))
    to_override.add(
        Override('__rshift__',
                 pair_operator='>>',
                 precedence_level='_PRECEDENCE_SHIFTS'))
    to_override.add(
        Override('__rshift__',
                 pair_operator='>>',
                 precedence_level='_PRECEDENCE_SHIFTS'))
    to_override.add(
        Override('__neg__',
                 uni_operator='-',
                 precedence_level='_PRECEDENCE_POS_NEG_BITWISE_NOT'))
    to_override.add(
        Override('__pos__',
                 uni_operator='+',
                 precedence_level='_PRECEDENCE_POS_NEG_BITWISE_NOT'))
    to_override.add(Override('__abs__', unbound_method=abs))
    to_override.add(
        Override('__invert__',
                 uni_operator='~',
                 precedence_level='_PRECEDENCE_POS_NEG_BITWISE_NOT'))
    to_override.add(Override('__round__', unbound_method=round))

    # ** Boolean types **
    # .__and__, .__xor__, .__or__, __rand__, __rxor__, __ror__
    to_skip.update(
        {'__and__', '__xor__', '__or__', '__rand__', '__rxor__', '__ror__'})

    # ** Type conversion **
    # __int__, __long__, __float__, __complex__, __oct__, __hex__, __index__, __trunc__, __coerce__
    to_override.add(Override('__trunc__'))
    to_override.add(Override('__coerce__'))
    to_skip.update({'__index__'})
    to_override_with_exception.add(OverExc('__int__', unbound_method=int))
    # OverExc('__long__', unbound_method=long),
    to_override_with_exception.add(OverExc('__float__', unbound_method=float))
    to_override_with_exception.add(
        OverExc('__complex__', unbound_method=complex))
    to_override_with_exception.add(OverExc('__oct__', unbound_method=oct))
    to_override_with_exception.add(OverExc('__hex__', unbound_method=hex))
    # ('Index', '__index__', None)

    # ** Pickle **
    # __reduce__, __reduce_ex__
    to_skip.update({'__reduce__', '__reduce_ex__'})

    # make sure that the ones noted 'to skip' are not in the other sets to return
    to_override_2 = OrderedSet()
    for overriden in to_override:
        if overriden not in to_skip and overriden not in to_override_with_exception:
            assert type(overriden) == Override
            to_override_2.add(overriden)

    to_override_with_exception_2 = OrderedSet()
    for overriden_with_e in to_override_with_exception:
        if overriden_with_e not in to_skip:
            assert type(overriden_with_e) == OverExc
            to_override_with_exception_2.add(overriden_with_e)

    return to_override_2, to_override_with_exception_2
コード例 #33
0
ファイル: test.py プロジェクト: LuminosoInsight/ordered-set
def test_update_value_error():
    set1 = OrderedSet('ab')
    with pytest.raises(ValueError):
        # noinspection PyTypeChecker
        set1.update(3)
コード例 #34
0
from ordered_set import OrderedSet
from pathlib import Path
#
# This is a template for the Case-level override configuration file.
# To use it, the file name must match the case ID (i.e SR000005.py for case ID SR000005)
#

# Example of running minimal set of plugins for mactime
limited_plugins_group = ['shellbags', 'mftparser', 'timeliner', 'mactime']

# Only run custom list of plugins
active_plugins = OrderedSet()
active_plugins.update(limited_plugins_group)

# Run default + additional set of plugins
#additional_plugins = set()
#additional_plugins.update(limited_plugins_group)

# Exclude specific plugins
#exclude_plugins = set(['dlldump'])

#plugins_config = dict({
#    'dlldump': {'extra_flags': "--dump-dir=%s" % Path(r"/var/tmp/memdumps/").as_posix(),
#                'splunk_output': False
#                },
#    'dumpregistry': {'splunk_output': False}
#})
class EchonestExtension(BaseExtension):
    EXTENSION_NAME = 'echonest-extension'

    def __init__(self, app):
        BaseExtension.__init__(self, app)
        self.__asset_menu_item = None
        self.__analysis_handler_id = 0
        self.__audio_previewer = None
        self.__current_builder = None
        self.__clap_mixer = ClapMixer()
        self.__clap_mixer_handlers = []
        self.__current_track = None

        self.__button1_motion_start = None
        self.__button2_motion_end = None

        self.__selected_beats = None

    def setup(self):
        self.app.gui.medialibrary.connect('populating-asset-menu',
                self.__add_asset_menu_item_cb)
        self.app.gui.timeline_ui.timeline.connect('populating-clip-menu',
                self.__add_clip_menu_item_cb)

    def __load_from_cache(self, filename):
        filename = hash_file(filename) + '.analysis'
        cache_dir = get_dir(os.path.join(xdg_cache_home(), "echonest"))
        filename = os.path.join(cache_dir, filename)
        try:
            with open(filename, 'rb') as f:
                return pickle.load(f)
        except IOError:
            return None

    def __save_to_cache(self, filename, track):
        filename = hash_file(filename) + '.analysis'
        cache_dir = get_dir(os.path.join(xdg_cache_home(), "echonest"))
        filename = os.path.join(cache_dir, filename)
        with open(filename, 'wb') as f:
            pickle.dump(track, f)

    def analysis_worker(self, filename, callback, user_data):
        track = self.__load_from_cache(filename)

        if not track:
            track = echotrack.track_from_filename(filename)
            track.get_analysis()
            self.__save_to_cache(filename, track)

        if (callback):
            callback(track, *user_data)

    def __analyse_track(self, filename, callback, user_data):
        t = threading.Thread(target=self.analysis_worker, args=(filename,
            callback, user_data))
        t.daemon = True
        t.start()

    def __add_clip_menu_item_cb(self, timeline, clip, menu):
        menu_item = Gtk.MenuItem.new_with_label("Echonest dialog")
        menu_item.connect('activate',
                self.__clip_dialog_cb, clip)
        menu.append(menu_item)

    def __fill_metadata_list(self, track):
        listbox = self.__current_builder.get_object('metadata-list')
        for name, value in sorted(track.__dict__.items()):
            if name in METADATA_BLACKLIST:
                continue

            if name in LIST_TYPED_METADATA:
                text = "Number of %s : %d" % (name, len(value))
            else:
                text = "%s : %s" % (name, str(value))

            label = Gtk.Label.new(text)
            label.set_halign (Gtk.Align.START)

            if name in LIST_TYPED_METADATA:
                listbox.prepend(label)
            else:
                listbox.insert(label, -1)

        listbox.show_all()

    def __prepare_beat_matcher(self, track, asset, filename):
        darea = self.__current_builder.get_object('waveform_area')
        self.__audio_previewer = AudioPreviewer(track, darea, filename)
        darea.get_style_context().add_class("AudioUriSource")

        for id_ in ('range-combo', 'select-type-combo', 'distribution-combo',
                'step-spinner'):
            self.__current_builder.get_object(id_).set_sensitive(True)

        self.__clap_mixer.set_asset(asset)
        self.__clap_mixer_handlers.append(
                self.__clap_mixer.pipeline.connect("state-change",
                self.__mixer_state_changed_cb))

        self.__clap_mixer.pipeline.activatePositionListener(50)
        self.__clap_mixer_handlers.append(self.__clap_mixer.pipeline.connect("position",
                self.__mixer_position_cb, track))

        step = int(self.__current_builder.get_object('step-spinner').get_value())
        self.__selected_beats = OrderedSet([b['start'] for b in track.beats[0::step]])

        self.__compute_markers()

    def __display_track_analysis(self, track, builder, asset, filename):
        if builder != self.__current_builder:
            return

        self.__current_track = track
        self.__fill_metadata_list(track)
        self.__prepare_beat_matcher(track, asset, filename)

    def __compute_markers(self):
        b = self.__current_builder
        t = self.__current_track
        claps = []

        range_ = b.get_object('range-combo').get_active_id()
        selection_type = b.get_object('select-type-combo').get_active_id()
        distribution = b.get_object('distribution-combo').get_active_id()
        step = int(b.get_object('step-spinner').get_value())

        if range_ == 'full':
            all_beats = [b['start'] for b in t.beats]
        elif self.__audio_previewer.selected_section:
            s = self.__audio_previewer.selected_section
            nb_beats = len(t.beats)
            start = int(s[0] * nb_beats)
            end = int(s[1] * nb_beats)
            all_beats = [b['start'] for b in t.beats[start:end]]
        else:
            all_beats = []

        selected_beats = all_beats[0::step]

        if selection_type == 'exactly':
            self.__selected_beats -= all_beats
            self.__selected_beats.update(selected_beats)
        elif selection_type == 'add':
            self.__selected_beats.update(selected_beats)
        else:
            self.__selected_beats -= selected_beats

        markers = [b / t.duration for b in self.__selected_beats]
        claps = [b * Gst.SECOND for b in self.__selected_beats]

        self.__clap_mixer.set_positions(claps)
        self.__audio_previewer.set_markers(markers)
        self.__audio_previewer.darea.queue_draw()

    def __select_waveform_section(self):
        startpos = max(0.0, min(self.__button1_motion_start,
            self.__button1_motion_end))
        endpos = min(1.0, max(self.__button1_motion_start,
            self.__button1_motion_end))
        self.__audio_previewer.set_selected_section(startpos, endpos)
        self.__audio_previewer.darea.queue_draw()

    def __add_markers_to_timeline(self, clip):
        bTimeline = self.app.gui.timeline_ui.bTimeline
        ip = clip.get_inpoint()
        end = clip.get_duration() + ip
        start = clip.get_start()
        markers = [b * Gst.SECOND + start - ip for b in self.__selected_beats if
                ip < b * Gst.SECOND < end]
        bTimeline.add_snapping_points(markers)
        self.app.gui.timeline_ui.ruler.queue_draw()

    def _match_spin_changed_cb(self, spinner):
        step = int(self.__current_builder.get_object('step-spinner').get_value())

        if step == 1:
            self.__current_builder.get_object('beat_label').set_text("beat")
        else:
            self.__current_builder.get_object('beat_label').set_text("beats")

    def _match_select_clicked_cb(self, unused_widget):
        self.__compute_markers()

    def _back_clicked_cb(self, unused_widget):
        self.__clap_mixer.pipeline.simple_seek(0)

    def _end_clicked_cb(self, unused_widget):
        #FIXME: do we even want that ?
        pass

    def _play_pause_clicked_cb(self, button):
        state = self.__clap_mixer.pipeline.getState()
        if state == Gst.State.PLAYING:
            self.__clap_mixer.pipeline.pause()
        else:
            self.__clap_mixer.pipeline.play()

    def _waveform_area_motion_notify_cb(self, darea, event):
        width = darea.get_allocation().width
        if self.__button1_motion_start is None:
            self.__button1_motion_start = event.x / width

        self.__button1_motion_end = event.x / width
        self.__select_waveform_section()


    def _waveform_area_button_release_cb(self, darea, event):
        position = event.x / darea.get_allocation().width

        if self.__button1_motion_start is None:
            nsposition = self.__current_track.duration * position * Gst.SECOND
            self.__clap_mixer.pipeline.simple_seek(int(nsposition))
        else:
            self.__select_waveform_section()

        self.__button1_motion_start = None
        self.__button1_motion_end = None

    def __clip_dialog_cb(self, widget, clip):
        clip = clip.bClip
        asset = clip.get_asset()
        filename = GLib.filename_from_uri(clip.props.uri)[0]

        self.__current_builder = Gtk.Builder()
        self.__current_builder.add_from_file(os.path.join(here, 'clip-dialog.ui'))
        self.__current_builder.connect_signals(self)
        self.__current_builder.get_object('step-spinner').set_range(1, 100)
        dialog = self.__current_builder.get_object('clip-dialog')
        dialog.set_transient_for(self.app.gui)

        self.__analyse_track(filename, self.__display_track_analysis,
                (self.__current_builder, asset, filename,))

        res = dialog.run()
        # We gud
        dialog.destroy()

        self.__add_markers_to_timeline(clip)

        self.__clap_mixer.reset()

        for handler_id in self.__clap_mixer_handlers:
            GObject.signal_handler_disconnect(self.__clap_mixer.pipeline,
                    handler_id)

        self.__clap_mixer_handlers = []
        self.__current_builder = None
        self.__selected_beats = None

    def __add_asset_menu_item_cb(self, medialibrary, model_row, menu):
        menu_item = Gtk.MenuItem.new_with_label("Run echonest analysis")
        menu_item.connect('activate',
                self.__run_analysis_clicked_cb, model_row[COL_URI])
        menu.append(menu_item)

    def __run_analysis_clicked_cb(self, widget, asset_uri):
        self.__analyse_track(GLib.filename_from_uri(asset_uri)[0], None, None)

    def __mixer_state_changed_cb(self, unused_pipeline, new, prev):
        if not self.__current_builder:
            return

        image = self.__current_builder.get_object('play-pause-image')
        if new == Gst.State.PLAYING:
            image.set_from_icon_name('media-playback-pause', Gtk.IconSize.BUTTON)
        elif new == Gst.State.PAUSED:
            image.set_from_icon_name('media-playback-start', Gtk.IconSize.BUTTON)

    def __mixer_position_cb(self, unused_pipeline, position, track):
        if self.__audio_previewer:
            position_ratio = (position / Gst.SECOND) / track.duration
            self.__audio_previewer.position = position_ratio
            self.__audio_previewer.darea.queue_draw()