Esempio n. 1
0
    def __init__(self, mentions, extract_gold=False, pfts=None, mpairs=None):
        list.__init__(self, mentions)

        # current mention index, pair features map {mention -> {antecedent mention -> pair features}}, idk what this is
        self.cmid, self.pfts, self.mpairs = 0, pfts, mpairs
        # all gold clusters, mention to gold clusters map, all auto clusters, mention to auto clusters map
        self.gCs, self.m2_gCs, self.aCs, self.m2_aCs = None, None, set(), {}

        # mentions which have been visited already
        self.amset = set()

        self.ambiguous_labels = ["#other#", "#general#"]

        if extract_gold:
            labels = set(flatten([m.gold_refs for m in mentions]))
            # label to gold clusters map
            self.m_l2c = dict([(l, PluralCluster()) for l in labels])

            for m in mentions:
                for gref in m.gold_refs:
                    if gref not in self.ambiguous_labels:
                        self.m_l2c[gref].append(m)

            self.m2_gCs = {
                m: [
                    self.m_l2c[gref] if gref not in self.ambiguous_labels else
                    PluralCluster([m]) for gref in m.gold_refs
                ]
                for m in mentions
            }

            self.gCs = list(set(flatten(list(self.m2_gCs.values()))))

        self.reset()
Esempio n. 2
0
def omit(obj, *properties):
    """The opposite of :func:`pick`. This method creates an object composed of
    the property paths of `obj` that are not omitted.

    Args:
        obj (mixed): Object to process.
        *properties (str): Property values to omit.

    Returns:
        dict: Results of omitting properties.

    Example:

        >>> omit({'a': 1, 'b': 2, 'c': 3}, 'b', 'c') == {'a': 1}
        True
        >>> omit({'a': 1, 'b': 2, 'c': 3 }, ['a', 'c']) == {'b': 2}
        True
        >>> omit([1, 2, 3, 4], 0, 3) == {1: 2, 2: 3}
        True
        >>> omit({'a': {'b': {'c': 'd'}}}, 'a.b.c') == {'a': {'b': {}}}
        True

    .. versionadded:: 1.0.0

    .. versionchanged:: 4.0.0
        Moved iteratee argument to :func:`omit_by`.

    .. versionchanged:: 4.2.0
        Support deep paths.
    """
    return omit_by(obj, pyd.flatten(properties))
Esempio n. 3
0
def omit(obj, *properties):
    """
    The opposite of :func:`pick`. This method creates an object composed of the property paths of
    `obj` that are not omitted.

    Args:
        obj (mixed): Object to process.
        *properties (str): Property values to omit.

    Returns:
        dict: Results of omitting properties.

    Example:

        >>> omit({'a': 1, 'b': 2, 'c': 3}, 'b', 'c') == {'a': 1}
        True
        >>> omit({'a': 1, 'b': 2, 'c': 3 }, ['a', 'c']) == {'b': 2}
        True
        >>> omit([1, 2, 3, 4], 0, 3) == {1: 2, 2: 3}
        True
        >>> omit({'a': {'b': {'c': 'd'}}}, 'a.b.c') == {'a': {'b': {}}}
        True

    .. versionadded:: 1.0.0

    .. versionchanged:: 4.0.0
        Moved iteratee argument to :func:`omit_by`.

    .. versionchanged:: 4.2.0
        Support deep paths.
    """
    return omit_by(obj, pyd.flatten(properties))
Esempio n. 4
0
 def apply_entity_classes(
         self, list_of_entities: List[List[Dict[str,
                                                Any]]]) -> List[BaseEntity]:
     shaped_entities = []
     for (alternative_index, entities) in enumerate(list_of_entities):
         shaped_entities.append(self._reshape(entities, alternative_index))
     return py_.flatten(shaped_entities)
Esempio n. 5
0
def deep_scrape(starting_urls, depth=-1, url_patterns=None):
    if not isinstance(starting_urls, list):
        starting_urls = [starting_urls]

    if url_patterns is not None and not isinstance(url_patterns, list):
        url_patterns = [url_patterns]

    responses = []

    urls_to_parse = starting_urls

    while len(urls_to_parse) > 0 and depth != 0:
        curr_responses = http_multiget(urls_to_parse)
        responses += curr_responses
        parsed_urls = [url for (url, _) in responses]
        urls_to_parse = map(
            lambda
            (_, response): parse_response_for_urls(response, url_patterns),
            responses)
        urls_to_parse = pydash.flatten(urls_to_parse)
        urls_to_parse = [
            url for url in urls_to_parse if url not in parsed_urls
        ]
        depth -= 1

    return responses
Esempio n. 6
0
File: 037.py Progetto: JamesWR/euler
def main():
    primes = util.prime_under_list(1000000)
    t_primes = [[2, 3, 5, 7]]
    l_t_primes = [[2, 3, 5, 7]]
    r_t_primes = [[2, 3, 5, 7]]
    i = 0
    while primes[i] < 10:
        i += 1
    size = 2
    while len(t_primes[size - 2]) > 0 or len(t_primes[size - 3]) > 0:
        t_primes.append([])
        r_t_primes.append([])
        l_t_primes.append([])
        while i < len(primes) and primes[i] < (10 ** (size)):
            first_n = primes[i] % (10 ** (size - 1))
            last_n = math.floor(primes[i] / 10)
            is_right_t = first_n in r_t_primes[size - 2]
            is_left_t = last_n in l_t_primes[size - 2]
            if is_right_t and is_left_t:
                t_primes[size - 1].append(primes[i])
                r_t_primes[size - 1].append(primes[i])
                l_t_primes[size - 1].append(primes[i])
            elif is_right_t:
                r_t_primes[size - 1].append(primes[i])
            elif is_left_t:
                l_t_primes[size - 1].append(primes[i])
            i += 1
        size += 1
    print(sum(_.flatten(t_primes[1:])))
Esempio n. 7
0
def flat_links(multiple_links: Tuple[dict, ...]) -> Tuple[str, ...]:
    """Flat links in tuple."""
    return tuple(
        flatten(
            list(map(
                lambda links_data: links_data['links'],
                multiple_links,
            )), ), )
Esempio n. 8
0
    def __init__(self, func, *indexes):
        self.func = func

        # Index `indexes` by the index value so we can do a lookup mapping by
        # walking the function arguments.
        self.indexes = dict(
            (src_index, dest_index)
            for dest_index, src_index in enumerate(pyd.flatten(indexes)))
Esempio n. 9
0
    def __init__(self, func, *indexes):
        self.func = func

        # Index `indexes` by the index value so we can do a lookup mapping by walking the function
        # arguments.
        self.indexes = dict(
            (src_index, dest_index)
            for dest_index, src_index in enumerate(pyd.flatten(indexes)))
def list_of_overalps(
        seqs1: Tuple[LineToVecEncodings, ...],
        seqs2: Tuple[LineToVecEncodings, ...]) -> List[LineToVecEncodings]:
    seqs1_backwards = tuple(seq[::-1] for seq in seqs1)
    seqs2_backwards = tuple(seq[::-1] for seq in seqs2)
    return pydash.flatten([
        *compute_score_for_all(seqs1, seqs2),
        *compute_score_for_all(seqs1_backwards, seqs2_backwards),
    ])
Esempio n. 11
0
def augment_data(train_filename, new_train_filename):
	products_data_map, products_name_map = get_product_data()
	data = load_data(train_filename)

	generated_data = flatten([generate_data_from_product(sku, product['name']) for sku, product in products_data_map.items()])
	generated_data_df = pd.DataFrame(generated_data, columns=['user', 'sku', 'category', 'query', 'click_time', 'query_time'])
	
	new_train = pd.concat([data, generated_data_df]).sample(frac=1).reset_index(drop=True)

	save_data(new_train, new_train_filename)
Esempio n. 12
0
    def __init__(self, mentions, extract_gold=False, pfts=None, mpairs=None):
        list.__init__(self, mentions)

        self.cmid, self.pfts, self.mpairs = 0, pfts, mpairs
        self.gCs, self.m2_gC, self.aCs, self.m2_aC = None, None, [], {}

        self.ambiguous_labels = ["#other#", "#general#"]

        if extract_gold:
            # labels = set([m.gold_ref for m in mentions])
            labels = set(flatten([m.all_gold_refs for m in mentions]))
            # label to gold cluster for singular case
            m_l2c = dict([(l, SingEvalMentionCluster()) for l in labels])
            # label to gold cluster for all mentions
            m_l2cs = dict([(l, SingEvalMentionCluster()) for l in labels])

            for m in mentions:
                if m.gold_ref not in self.ambiguous_labels:
                    m_l2c[m.gold_ref].append(m)

                for gref in m.all_gold_refs:
                    if gref not in self.ambiguous_labels:
                        m_l2cs[gref].append(m)

            self.m2_gC = {
                m: m_l2c[m.gold_ref] if m.gold_ref not in self.ambiguous_labels
                else SingEvalMentionCluster([m])
                for m in mentions
            }
            # self.gCs = list(set(self.m2_gC.values()))

            self.m2_gCs = {
                m: [
                    m_l2cs[ref] if ref not in self.ambiguous_labels else
                    SingEvalMentionCluster([m]) for ref in m.all_gold_refs
                ]
                for m in mentions
            }
            self.gCs = list(set(flatten(list(self.m2_gCs.values()))))
            # print([[m.gold_ref for m in gc] for gc in self.gCs])

        self.reset()
Esempio n. 13
0
    def list_version_files(self, version, return_unmatched_urls=False):
        if not isinstance(version, list):
            versions = [version]
        else:
            versions = version

        urls_to_parse = []

        for version in versions:
            version_url = urlparse.urljoin(self.starting_url,
                                           'v' + version + '/')
            urls_to_parse.append(version_url)

        responses = http_multiget(urls_to_parse)

        parsed_urls = pydash.flatten(
            [parse_response_for_urls(response) for (_, response) in responses])

        version_files = {}
        unmatched_urls = []

        for parsed_url in parsed_urls:

            pattern_match = self.pattern_tree.search(parsed_url)

            if pattern_match is not None:
                match, path = pattern_match
                file_type = path[0]

                if file_type == 'binaries':
                    url_version = match.group(1)
                    filename = match.group(2)
                    system = match.group(3)
                    extension = match.group(4)
                    pydash.set_(version_files,
                                [url_version, file_type, system, extension], {
                                    'filename': filename,
                                    'url': parsed_url
                                })
                else:
                    url_version = match.group(1)
                    filename = match.group(2)
                    extension = match.group(3)
                    pydash.set_(version_files,
                                [url_version, file_type, extension], {
                                    'filename': filename,
                                    'url': parsed_url
                                })

            else:
                unmatched_urls.append(parsed_url)
        if return_unmatched_urls:
            version_files['unmatched_urls'] = unmatched_urls
        return version_files
Esempio n. 14
0
    def search(self, *criterion, **kargs):
        """Return search query object.

        Args:
            *criterion (sqlaexpr, optional): SQLA expression to filter against.

        Keyword Args:
            per_page (int, optional): Number of results to return per page.
                Defaults to ``None`` (i.e. no limit).
            page (int, optional): Which page offset of results to return.
                Defaults to ``1``.
            order_by (sqlaexpr, optional): Order by expression. Defaults to
                ``None``.

        Returns:
            Query: New :class:`Query` instance with criteria and parameters
                applied.
        """
        order_by = kargs.get('order_by')
        page = kargs.get('page')
        per_page = kargs.get('per_page')

        model_class = self.model_class

        if order_by is None and model_class:
            order_by = core.mapper_primary_key(self.model_class)

        query = self

        for criteria in pyd.flatten(criterion):
            # If we have keyword (dict) criteria, we want to apply it to the
            # base model (if present) instead of the last joined model.
            if isinstance(criteria, dict) and model_class:
                criteria = [
                    getattr(model_class, key) == val
                    for key, val in iteritems(criteria)
                ]

            if isinstance(criteria, dict):
                query = query.filter_by(**criteria)
            else:
                if not isinstance(criteria, list):
                    criteria = [criteria]
                query = query.filter(*criteria)

        if order_by is not None:
            if not isinstance(order_by, (list, tuple)):
                order_by = [order_by]
            query = query.order_by(*order_by)

        if per_page or page:
            query = query.paginate((per_page, page))

        return query
Esempio n. 15
0
def main():
    logging.info('Iniciando processamento de JSON de dados para SQL')

    logging.info('Verificando primeiro se json de dados existe')

    if not os.path.exists(constants.JSON_OUTPUT_PATH):
        logging.info('Arquivo não existe')
        return None

    with open(constants.JSON_OUTPUT_PATH) as _json:
        elections = json.load(_json)

        elections_df = flatten(
            [get_election_data(election=election) for election in elections])

        states_df = flatten(
            [get_states_data(election=election) for election in elections])

        coligations_df = flatten_deep(
            [get_coligacoes_data(election=election) for election in elections])

        parties_df = flatten_deep(
            [get_parties_data(election=election) for election in elections])

        candidates_df = flatten_deep(
            [get_candidates_data(election=election) for election in elections])

        logging.info('Salvando dados das eleições')
        save_data(df=elections_df, table='tb_elections')

        logging.info('Salvando dados dos estados')
        save_data(df=states_df, table='tb_states')

        logging.info('Salvando dados das coligações')
        save_data(df=coligations_df, table='tb_coligations')

        logging.info('Salvando dados dos partidos')
        save_data(df=parties_df, table='tb_parties')

        logging.info('Salvando dados dos candidatos')
        save_data(df=candidates_df, table='tb_delegates')
Esempio n. 16
0
    def log_tensorboard(self):
        '''
        Log summary and useful info to TensorBoard.
        NOTE this logging is comprehensive and memory-intensive, hence it is used in dev mode only
        '''
        # initialize TensorBoard writer
        if not hasattr(self, 'tb_writer'):
            log_prepath = self.spec['meta']['log_prepath']
            self.tb_writer = SummaryWriter(
                os.path.dirname(log_prepath),
                filename_suffix=os.path.basename(log_prepath))
            self.tb_actions = []  # store actions for tensorboard
            logger.info(
                f'Using TensorBoard logging for dev mode. Run `tensorboard --logdir={log_prepath}` to start TensorBoard.'
            )

        trial_index = self.spec['meta']['trial']
        session_index = self.spec['meta']['session']
        if session_index != 0:  # log only session 0
            return
        idx_suffix = f'trial{trial_index}_session{session_index}'
        frame = self.env.clock.frame
        # add main graph
        if False and self.env.clock.frame == 0 and hasattr(
                self.agent.algorithm, 'net'):
            # can only log 1 net to tb now, and 8 is a good common length for stacked and rnn inputs
            net = self.agent.algorithm.net
            self.tb_writer.add_graph(net,
                                     torch.rand(ps.flatten([8, net.in_dim])))
        # add summary variables
        last_row = self.train_df.iloc[-1]
        for k, v in last_row.items():
            self.tb_writer.add_scalar(f'{k}/{idx_suffix}', v, frame)
        # add network parameters
        for net_name in self.agent.algorithm.net_names:
            if net_name.startswith('global_') or net_name.startswith(
                    'target_'):
                continue
            net = getattr(self.agent.algorithm, net_name)
            for name, params in net.named_parameters():
                self.tb_writer.add_histogram(f'{net_name}.{name}/{idx_suffix}',
                                             params, frame)
        # add action histogram and flush
        if not ps.is_empty(self.tb_actions):
            actions = np.array(self.tb_actions)
            if len(actions.shape) == 1:
                self.tb_writer.add_histogram(f'action/{idx_suffix}', actions,
                                             frame)
            else:  # multi-action
                for idx, subactions in enumerate(actions.T):
                    self.tb_writer.add_histogram(f'action.{idx}/{idx_suffix}',
                                                 subactions, frame)
            self.tb_actions = []
Esempio n. 17
0
def aggregate_data(dataOptions, responses):
    if "mainTable" in dataOptions and dataOptions["mainTable"]:
        dataOptions["mainTable"] = aggregate(responses,
                                             dataOptions["mainTable"])
    if "unwindTables" in dataOptions:
        for unwindCol in dataOptions["unwindTables"]:
            unwoundRes = [
                get(response["value"], unwindCol) for response in responses
            ]
            unwoundRes = compact(flatten(unwoundRes))
            dataOptions["unwindTables"][unwindCol] = aggregate(
                unwoundRes, dataOptions["unwindTables"][unwindCol])
    return dataOptions
Esempio n. 18
0
    def feature_combinations(cls, features: List, depth=0) -> List:
        depth = depth or len(features)
        # depth = np.min( np.max(0, depth), len(features) )

        combinations = pydash.flatten([
            list(itertools.combinations(features, n))
            for n in range(0, depth + 1)
        ])
        combinations += [tuple(features)]
        combinations = pydash.uniq(combinations)

        # DEBUG: print( type(combinations), combinations )
        return combinations
Esempio n. 19
0
def _apply_match_heuristic(page, link_contexts, to_match, entity):
  '''helper for defining heuristics for finding mentions of an entity'''
  matches = u.match_all(to_match, page['plaintext'])
  mentions = _.flatten(link_contexts.values())
  link_context = {entity: [{'text': to_match,
                            'offset': match_index,
                            'page_title': page['title']} for match_index in matches]}
  filtered_link_context = {entity: [mention for mention in link_context[entity] if not _mention_overlaps(mentions, mention)]}
  concat = lambda dest, src: _.uniq_by(dest + src, 'offset') if dest else src
  if not _.is_empty(filtered_link_context[entity]):
    return _.merge_with(link_contexts, filtered_link_context, iteratee=concat)
  else:
    return link_contexts
Esempio n. 20
0
def make_entity_object(entity_items):
    duckling_plugin = DucklingPlugin(
        dest="output.entities",
        dimensions=["date", "time", "duration", "number", "people"],
        timezone="Asia/Kolkata",
        debug=False,
        locale="en_IN",
    )
    return py_.flatten(
        [
            duckling_plugin._reshape(entities, i)
            for i, entities in enumerate(entity_items)
        ]
    )
Esempio n. 21
0
def train_model():
    nlp = spacy.load("en_core_web_sm")

    sentences = flatten([item["patterns"] for item in intents_json])
    sentences = [nlp(sentence) for sentence in sentences]
    sentences = [lemmatize_sentence(sentence) for sentence in sentences]

    bag_of_words = chain(sentences).flatten().uniq().sort().value()

    intents = [item["patterns"] for item in intents_json]

    X = [
        sentence_to_feature_vector(sentence, bag_of_words)
        for sentence in sentences
    ]

    y = []
    for idx, patterns in enumerate(intents):
        for pattern in patterns:
            entry = list(np.zeros(len(intents)))
            entry[idx] = 1
            y.append(entry)

    indexes = [i for i, x in enumerate(sentences) if is_empty(x)]
    for index in indexes:
        del X[index]
        del y[index]

    model = Sequential()
    model.add(Dense(64, input_shape=(len(X[0]), ), activation="relu"))
    model.add(Dropout(0.5))
    model.add(Dense(32, activation="relu"))
    model.add(Dropout(0.5))
    model.add(Dense(len(y[0]), activation="softmax"))

    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss="categorical_crossentropy",
                  optimizer=sgd,
                  metrics=["accuracy"])

    # fitting and saving the model
    hist = model.fit(np.array(X),
                     np.array(y),
                     epochs=200,
                     batch_size=5,
                     verbose=1)

    pickle.dump(bag_of_words, open(f"{MODEL_FOLDER}/words.pkl", "wb"))
    pickle.dump(intents_json, open(f"{MODEL_FOLDER}/intents.pkl", "wb"))
    model.save(f"{MODEL_FOLDER}/chatbot.h5", hist)
Esempio n. 22
0
 def list_version_urls(self):
     responses = deep_scrape(self.starting_url,
                             url_patterns=self.scraper_url_patterns)
     parse_lambda = lambda (_, response): parse_response_for_urls(
         response, regex_filters=self.scraper_url_patterns)
     parsed_urls = map(parse_lambda, responses)
     parsed_urls = pydash.flatten(parsed_urls)
     version_urls = {}
     for parsed_url in parsed_urls:
         for version_url_pattern in self.version_url_patterns:
             match = version_url_pattern.search(parsed_url)
             if match is not None:
                 version = match.group(1)
                 version_urls[version] = parsed_url
                 break
     return version_urls
Esempio n. 23
0
    def set_related_docs_from_json(self, filepath, strict, **options):
        """
    Update the document table with the documents stored in the json file
    specified in param `filepath`.
    For each item, two properties are compulsory:
    - "slug"
    - "related_documents|list"
    The JSON file comes thanks to the [google-spreadsheet-to-json](https://www.npmjs.com/package/google-spreadsheet-to-json)

    usage:
    python manage.py documents set_related_docs_from_json --file ./docs.json
    """
        if filepath is None:
            raise Exception('filepath should be specified')
        logger.debug('set_related_docs_from_json {0}'.format(filepath))
        with open(filepath) as f:
            docs = filter(
                lambda x: 'slug' in x and 'related_documents|list' in x,
                flatten(json.load(f)))

        if not docs:
            raise Exception(
                'json file looks empty (or no fields with \'slug\' in x and \'related_documents|list\' in x)!'
            )
        logger.debug('found {0} docs'.format(len(docs)))

        for i, row in enumerate(docs):
            related_documents = filter(
                None,
                row.get('related_documents|list', '').split(','))
            slug = row.get('slug').strip()
            logger.debug('document "{0}" need to be connected to: {1}'.format(
                slug, related_documents))
            try:
                doc = Document.objects.get(slug=slug)
            except Document.DoesNotExist:
                logger.error('document {0} does not exist'.format(slug))
            else:
                related = Document.objects.filter(slug__in=related_documents)
                # since this is a symmetrical relationship, we do the cleansing before bulk importing the data.
                # This should happens in a separate command.
                # doc.documents.clear()
            doc.documents.add(*related)
            doc.save()
            logger.debug('document "{0}" connected to: "{1}"'.format(
                slug, [d.slug for d in doc.documents.all()]))
def collate_simple_mention_ranker(batch):
    element_features = []
    target_rankings = []
    features, candidate_ids, labels = zip(*batch)
    for mention_features, mention_candidate_ids, label in zip(
            features, candidate_ids, labels):
        ranking = [label]
        features_for_ranking = []
        for candidate_features, candidate_id in zip(mention_features,
                                                    mention_candidate_ids):
            if candidate_id != label:
                ranking.append(candidate_id)
            features_for_ranking.append(candidate_features)
        target_rankings.append(ranking)
        element_features.append(features_for_ranking)
    flattened_features = _.flatten(element_features)
    return (candidate_ids, torch.tensor(flattened_features)), target_rankings
Esempio n. 25
0
    def get_channels(self) -> List[Dict]:
        teams_list = self.driver.api['teams'].get_teams()

        def channels_per_team(team: Dict) -> List[Dict]:
            team_id = team.get('id')
            channels_list = \
              self.driver.api['teams'].get_public_channels(team_id)

            def channel_transformer(channel: Dict) -> Dict:
                return {
                    'id': channel.get('id'),
                    'name': channel.get('display_name')
                }

            return _.map_(channels_list, channel_transformer)

        return _.flatten(_.map_(teams_list, channels_per_team))
Esempio n. 26
0
 def log_tensorboard(self):
     '''
     Log summary and useful info to TensorBoard.
     To launch TensorBoard, run `tensorboard --logdir=data` after a session/trial is completed.
     '''
     trial_index = self.agent.spec['meta']['trial']
     session_index = self.agent.spec['meta']['session']
     if session_index != 0:  # log only session 0
         return
     idx_suffix = f'trial{trial_index}_session{session_index}'
     frame = self.env.clock.frame
     # add main graph
     if False and self.env.clock.frame == 0 and hasattr(
             self.agent.algorithm, 'net'):
         # can only log 1 net to tb now, and 8 is a good common length for stacked and rnn inputs
         net = self.agent.algorithm.net
         self.tb_writer.add_graph(net,
                                  torch.rand(ps.flatten([8, net.in_dim])))
     # add summary variables
     last_row = self.train_df.iloc[-1]
     for k, v in last_row.items():
         self.tb_writer.add_scalar(f'{k}/{idx_suffix}', v, frame)
     # add tensorboard tracker for custom variables
     for k, v in self.tb_tracker.items():
         self.tb_writer.add_scalar(f'{k}/{idx_suffix}', v, frame)
     # add network parameters
     for net_name in self.agent.algorithm.net_names:
         if net_name.startswith('global_') or net_name.startswith(
                 'target_'):
             continue
         net = getattr(self.agent.algorithm, net_name)
         for name, params in net.named_parameters():
             self.tb_writer.add_histogram(f'{net_name}.{name}/{idx_suffix}',
                                          params, frame)
     # add action histogram and flush
     if not ps.is_empty(self.tb_actions):
         actions = np.array(self.tb_actions)
         if len(actions.shape) == 1:
             self.tb_writer.add_histogram(f'action/{idx_suffix}', actions,
                                          frame)
         else:  # multi-action
             for idx, subactions in enumerate(actions.T):
                 self.tb_writer.add_histogram(f'action.{idx}/{idx_suffix}',
                                              subactions, frame)
         self.tb_actions = []
Esempio n. 27
0
def webscraped():
    output = []
    output.append(depog.depog())
    output.append(artbar.artbar())
    output.append(husa.husa())
    output.append(marta.marta())
    output.append(mdb.mdb())
    output.append(hadi.hadi())
    output.append(ndb.ndb())
    output.append(polar.polar())
    output.append(teren.teren())
    output.append(bolek.bolek())
    # output.append(buran.buran())
    # output.append(zahrad.zahradbami())
    # output.append(radost.radost())
    # output.append(feste.feste())
    flat_output = flatten(output)
    return flat_output
Esempio n. 28
0
def map_values_deep(obj, iteratee=None, property_path=NoValue):
    """Map all non-object values in `obj` with return values from `iteratee`.
    The iteratee is invoked with two arguments: ``(obj_value, property_path)``
    where ``property_path`` contains the list of path keys corresponding to the
    path of ``obj_value``.

    Args:
        obj (list|dict): Object to map.
        iteratee (function): Iteratee applied to each value.

    Returns:
        mixed: The modified object.

    Warning:
        `obj` is modified in place.

    Example:

        >>> x = {'a': 1, 'b': {'c': 2}}
        >>> y = map_values_deep(x, lambda val: val * 2)
        >>> y == {'a': 2, 'b': {'c': 4}}
        True
        >>> z = map_values_deep(x, lambda val, props: props)
        >>> z == {'a': ['a'], 'b': {'c': ['b', 'c']}}
        True

    .. versionadded: 2.2.0

    .. versionchanged:: 3.0.0
        Allow iteratees to accept partial arguments.

    .. versionchanged:: 4.0.0
        Renamed from ``deep_map_values`` to ``map_values_deep``.
    """
    properties = to_path(property_path)

    if pyd.is_object(obj):
        deep_iteratee = (
            lambda value, key: map_values_deep(value,
                                               iteratee,
                                               pyd.flatten([properties, key])))
        return assign(obj, map_values(obj, deep_iteratee))
    else:
        return callit(iteratee, obj, properties)
Esempio n. 29
0
def map_values_deep(obj, iteratee=None, property_path=NoValue):
    """Map all non-object values in `obj` with return values from `iteratee`.
    The iteratee is invoked with two arguments: ``(obj_value, property_path)``
    where ``property_path`` contains the list of path keys corresponding to the
    path of ``obj_value``.

    Args:
        obj (list|dict): Object to map.
        iteratee (function): Iteratee applied to each value.

    Returns:
        mixed: The modified object.

    Warning:
        `obj` is modified in place.

    Example:

        >>> x = {'a': 1, 'b': {'c': 2}}
        >>> y = map_values_deep(x, lambda val: val * 2)
        >>> y == {'a': 2, 'b': {'c': 4}}
        True
        >>> z = map_values_deep(x, lambda val, props: props)
        >>> z == {'a': ['a'], 'b': {'c': ['b', 'c']}}
        True

    .. versionadded: 2.2.0

    .. versionchanged:: 3.0.0
        Allow iteratees to accept partial arguments.

    .. versionchanged:: 4.0.0
        Renamed from ``deep_map_values`` to ``map_values_deep``.
    """
    properties = to_path(property_path)

    if pyd.is_object(obj):
        deep_iteratee = (
            lambda value, key: map_values_deep(value,
                                               iteratee,
                                               pyd.flatten([properties, key])))
        return assign(obj, map_values(obj, deep_iteratee))
    else:
        return callit(iteratee, obj, properties)
Esempio n. 30
0
 def _collect(self,
              func,
              filter=None,
              reactions=None,
              unique=False,
              flatten=True):
     if reactions is None:
         reactions = self.reactions
     if filter is not None:
         reactions = [r for r in reactions if filter(r)]
     collected = []
     for r in reactions:
         c = func(r)
         collected.append(c)
     if flatten:
         collected = pydash.flatten(collected)
     if unique:
         collected = pydash.uniq(collected)
     return collected
Esempio n. 31
0
    def search(self, *criterion, **kargs):
        """Return search query object.

        Args:
            *criterion (sqlaexpr, optional): SQLA expression to filter against.

        Keyword Args:
            per_page (int, optional): Number of results to return per page.
                Defaults to ``None`` (i.e. no limit).
            page (int, optional): Which page offset of results to return.
                Defaults to ``1``.
            order_by (sqlaexpr, optional): Order by expression. Defaults to
                ``None``.

        Returns:
            Query: New :class:`Query` instance with criteria and parameters
                applied.
        """
        order_by = kargs.get('order_by')
        page = kargs.get('page')
        per_page = kargs.get('per_page')

        if order_by is None and self.model_classes:
            order_by = core.mapper_primary_key(self.model_classes[0])

        query = self

        for criteria in pyd.flatten(criterion):
            if isinstance(criteria, dict):
                query = query.filter_by(**criteria)
            else:
                query = query.filter(criteria)

        if order_by is not None:
            if not isinstance(order_by, (list, tuple)):
                order_by = [order_by]
            query = query.order_by(*order_by)

        if per_page or page:
            query = query.paginate((per_page, page))

        return query
Esempio n. 32
0
def deep_map_values(obj, callback=None, property_path=NoValue):
    """Map all non-object values in `obj` with return values from `callback`.
    The callback is invoked with two arguments: ``(obj_value, property_path)``
    where ``property_path`` contains the list of path keys corresponding to the
    path of ``obj_value``.

    Args:
        obj (list|dict): Object to map.
        callback (function): Callback applied to each value.

    Returns:
        mixed: The modified object.

    Warning:
        `obj` is modified in place.

    Example:

        >>> x = {'a': 1, 'b': {'c': 2}}
        >>> y = deep_map_values(x, lambda val: val * 2)
        >>> y == {'a': 2, 'b': {'c': 4}}
        True
        >>> z = deep_map_values(x, lambda val, props: props)
        >>> z == {'a': ['a'], 'b': {'c': ['b', 'c']}}
        True

    .. versionadded: 2.2.0

    .. versionchanged:: 3.0.0
        Allow callbacks to accept partial arguments.
    """
    properties = path_keys(property_path)

    if pyd.is_object(obj):
        deep_callback = (
            lambda value, key: deep_map_values(value,
                                               callback,
                                               pyd.flatten([properties, key])))
        return pyd.extend(obj, map_values(obj, deep_callback))
    else:
        return callit(callback, obj, properties)
Esempio n. 33
0
def nest(collection, *properties):
    """This method is like :func:`group_by` except that it supports nested
    grouping by multiple string `properties`. If only a single key is given, it
    is like calling ``group_by(collection, prop)``.

    Args:
        collection (list|dict): Collection to iterate over.
        *properties (str): Properties to nest by.

    Returns:
        dict: Results of nested grouping by `properties`.

    Example:

        >>> results = nest([{'shape': 'square', 'color': 'red', 'qty': 5},\
                            {'shape': 'square', 'color': 'blue', 'qty': 10},\
                            {'shape': 'square', 'color': 'orange', 'qty': 5},\
                            {'shape': 'circle', 'color': 'yellow', 'qty': 5},\
                            {'shape': 'circle', 'color': 'pink', 'qty': 10},\
                            {'shape': 'oval', 'color': 'purple', 'qty': 5}],\
                           'shape', 'qty')
        >>> expected = {\
            'square': {5: [{'shape': 'square', 'color': 'red', 'qty': 5},\
                           {'shape': 'square', 'color': 'orange', 'qty': 5}],\
                       10: [{'shape': 'square', 'color': 'blue', 'qty': 10}]},\
            'circle': {5: [{'shape': 'circle', 'color': 'yellow', 'qty': 5}],\
                       10: [{'shape': 'circle', 'color': 'pink', 'qty': 10}]},\
            'oval': {5: [{'shape': 'oval', 'color': 'purple', 'qty': 5}]}}
        >>> results == expected
        True

    .. versionadded:: 4.3.0
    """
    if not properties:
        return collection

    properties = pyd.flatten(properties)
    first, rest = properties[0], properties[1:]

    return pyd.map_values(group_by(collection, first),
                          lambda value: nest(value, *rest))
Esempio n. 34
0
def deep_map_values(obj, callback=None, property_path=NoValue):
    """Map all non-object values in `obj` with return values from `callback`.
    The callback is invoked with two arguments: ``(obj_value, property_path)``
    where ``property_path`` contains the list of path keys corresponding to the
    path of ``obj_value``.

    Args:
        obj (list|dict): Object to map.
        callback (callable): Callback applied to each value.

    Returns:
        mixed: The modified object.

    Warning:
        `obj` is modified in place.

    Example:

        >>> x = {'a': 1, 'b': {'c': 2}}
        >>> y = deep_map_values(x, lambda val: val * 2)
        >>> y == {'a': 2, 'b': {'c': 4}}
        True
        >>> z = deep_map_values(x, lambda val, props: props)
        >>> z == {'a': ['a'], 'b': {'c': ['b', 'c']}}
        True

    .. versionadded: 2.2.0

    .. versionchanged:: 3.0.0
        Allow callbacks to accept partial arguments.
    """
    properties = path_keys(property_path)

    if pyd.is_object(obj):
        deep_callback = (
            lambda value, key: deep_map_values(value,
                                               callback,
                                               pyd.flatten([properties, key])))
        return pyd.extend(obj, map_values(obj, deep_callback))
    else:
        return call_callback(callback, obj, properties)
Esempio n. 35
0
def nest(collection, *properties):
    """This method is like :func:`group_by` except that it supports nested
    grouping by multiple string `properties`. If only a single key is given, it
    is like calling ``group_by(collection, prop)``.

    Args:
        collection (list|dict): Collection to iterate over.
        *properties (str): Properties to nest by.

    Returns:
        dict: Results of nested grouping by `properties`.

    Example:

        >>> results = nest([{'shape': 'square', 'color': 'red', 'qty': 5},\
                            {'shape': 'square', 'color': 'blue', 'qty': 10},\
                            {'shape': 'square', 'color': 'orange', 'qty': 5},\
                            {'shape': 'circle', 'color': 'yellow', 'qty': 5},\
                            {'shape': 'circle', 'color': 'pink', 'qty': 10},\
                            {'shape': 'oval', 'color': 'purple', 'qty': 5}],\
                           'shape', 'qty')
        >>> expected = {\
            'square': {5: [{'shape': 'square', 'color': 'red', 'qty': 5},\
                           {'shape': 'square', 'color': 'orange', 'qty': 5}],\
                       10: [{'shape': 'square', 'color': 'blue', 'qty': 10}]},\
            'circle': {5: [{'shape': 'circle', 'color': 'yellow', 'qty': 5}],\
                       10: [{'shape': 'circle', 'color': 'pink', 'qty': 10}]},\
            'oval': {5: [{'shape': 'oval', 'color': 'purple', 'qty': 5}]}}
        >>> results == expected
        True

    .. versionadded:: 4.3.0
    """
    if not properties:
        return collection

    properties = pyd.flatten(properties)
    first, rest = properties[0], properties[1:]

    return pyd.map_values(group_by(collection, first),
                          lambda value: nest(value, *rest))
Esempio n. 36
0
def pick(obj, *properties):
    """Creates an object composed of the picked object properties.

    Args:
        obj (list|dict): Object to pick from.
        properties (str): Property values to pick.

    Returns:
        dict: Dict containg picked properties.

    Example:

        >>> pick({'a': 1, 'b': 2, 'c': 3}, 'a', 'b') == {'a': 1, 'b': 2}
        True

    .. versionadded:: 1.0.0

    .. versionchanged:: 4.0.0
        Moved iteratee argument to :func:`pick_by`.
    """
    return pick_by(obj, pyd.flatten(properties))
Esempio n. 37
0
def pick(obj, *properties):
    """Creates an object composed of the picked object properties.

    Args:
        obj (list|dict): Object to pick from.
        properties (str): Property values to pick.

    Returns:
        dict: Dict containg picked properties.

    Example:

        >>> pick({'a': 1, 'b': 2, 'c': 3}, 'a', 'b') == {'a': 1, 'b': 2}
        True

    .. versionadded:: 1.0.0

    .. versionchanged:: 4.0.0
        Moved iteratee argument to :func:`pick_by`.
    """
    return pick_by(obj, pyd.flatten(properties))
Esempio n. 38
0
def flat_map(collection, iteratee=None):
    """Creates a flattened list of values by running each element in
    collection thru `iteratee` and flattening the mapped results. The
    `iteratee` is invoked with three arguments:
    ``(value, index|key, collection)``.

    Args:
        collection (list|dict): Collection to iterate over.
        iteratee (mixed, optional): Iteratee applied per iteration.

    Returns:
        list: Flattened mapped list.

    Example:

        >>> duplicate = lambda n: [[n, n]]
        >>> flat_map([1, 2], duplicate)
        [[1, 1], [2, 2]]

    .. versionadded:: 4.0.0
    """
    return pyd.flatten(itermap(collection, iteratee=iteratee))
Esempio n. 39
0
def test_flatten(case, expected):
    assert _.flatten(*case) == expected
Esempio n. 40
0
 def __init__(self, func, *transforms):
     self.func = func
     self.transforms = pyd.flatten(transforms)