Beispiel #1
0
    def collect_replies(self, channel, messages, oldest=0):
        """
        Collect replies of a list of messages.
        """

        replies = [
            self.list_replies(channel=channel, ts=message["ts"], oldest=oldest)
            for message in messages
        ]
        return py_.flatten(replies)
Beispiel #2
0
    def resources(self) -> List[Resource]:
        """
        Parse list of resources using excel data.
        """

        assignments = [[
            name.strip() for name in it.split(",")
        ] for it in self.xlsx_data["Assigned to"].dropna().tolist()]

        resources = [
            Resource(name=name) for name in py_.uniq(py_.flatten(assignments))
        ]

        # We also add a ghost for unassigned items
        resources.append(Resource(name="Ghost"))
        return resources
Beispiel #3
0
def normalize_gantt_pro_projects(
        *projects: List[ganttpro.Project]
) -> Tuple[List[Resource], List[Task]]:
    """
    Convert ganttpro projects to list of valid resources and tasks for tj.
    """

    all_resources = py_.flatten(
        [[normalize_resource(res) for res in project.resources]
         for project in projects])
    resources = py_.uniq_by(all_resources, lambda res: res.name)

    tasks = []

    for project in projects:
        prefix = normalize_name(project.name)
        for task in project.tasks:
            tasks.append(namespace_task(normalize_task(task), prefix))

    return resources, tasks
Beispiel #4
0
    def update_targets(self, items):
        target = self.targets[0]
        xas_averaged = target.collection
        valids, invalids = py_.partition(
            mark_lu(py_.flatten(items), target.lu_field, self.dt_fetch),
            'valid')
        # Remove documents flagging now-valid data as invalid.
        xas_averaged.delete_many(
            mark_invalid({"mp_id": {
                "$in": py_.pluck(valids, 'mp_id')
            }}))

        for doc in valids:
            xas_averaged.update_one(py_.pick(doc, 'mp_id', 'element'),
                                    {'$set': doc},
                                    upsert=True)
        for doc in invalids:
            xas_averaged.update_one(mark_invalid(py_.pick(doc, 'mp_id')),
                                    {'$set': doc},
                                    upsert=True)
Beispiel #5
0
 def update_targets(self, items):
     xas_averaged = self.targets[0]
     xas_averaged.ensure_index([("valid", 1), ("mp_id", 1)])
     xas_averaged.ensure_index([("mp_id", 1), ("element", 1)])
     xas_averaged.ensure_index([("chemsys", 1), ("element", 1)])
     valids, invalids = py_.partition(
         mark_lu(py_.flatten(items), xas_averaged.lu_field, self.dt_fetch),
         'valid')
     # Remove documents flagging now-valid data as invalid.
     xas_averaged.collection.delete_many(
         mark_invalid({"mp_id": {
             "$in": py_.pluck(valids, 'mp_id')
         }}))
     bulk = xas_averaged.collection.initialize_ordered_bulk_op()
     for doc in valids:
         (bulk.find(py_.pick(doc, 'mp_id',
                             'element')).upsert().replace_one(doc))
     for doc in invalids:
         (bulk.find(mark_invalid(py_.pick(
             doc, 'mp_id'))).upsert().replace_one(doc))
     bulk.execute()
Beispiel #6
0
def collect_script_operator(script: Union[Dict, List],
                            operator: str) -> List[Dict]:
    """
    Walk down the script and collect usage of operator.
    """

    nodes = []

    if isinstance(script, dict):
        if script.get("op") == operator:
            nodes.append(script)

        skip_keys = ["op", "var"]
        for key, value in script.items():
            if key not in skip_keys:
                nodes.extend(collect_script_operator(value, operator))

    elif isinstance(script, list):
        nodes.extend(
            py_.flatten(
                [collect_script_operator(node, operator) for node in script]))

    return nodes
Beispiel #7
0
def unprocessed_mpids(sources, targets):
    xas = sources[0]
    xas_averaged = targets[0]
    mpids_marked_invalid = set(invalid_pks(xas_averaged, 'mp_id'))
    mpids_source_updated = set(
        updated_pks(xas, targets, 'mp_id', dt_map=lambda dt: dt.isoformat()))
    mpids_build_incomplete = set()
    for mp_id in tqdm(xas.collection.distinct('mp_id')):
        doc = xas.collection.find_one({'mp_id': mp_id}, ['structure'])
        structure = Structure.from_dict(doc['structure'])
        elements = set(
            py_.map(
                py_.flatten(site.species_and_occu.elements
                            for site in structure.sites), str))
        n_processed = xas_averaged.collection.find({
            'mp_id': mp_id,
            'element': {
                '$in': list(elements)
            }
        }).count()
        if n_processed != len(elements):
            mpids_build_incomplete.add(mp_id)
    return mpids_source_updated | (mpids_build_incomplete -
                                   mpids_marked_invalid)
Beispiel #8
0
def test6():
    def get_line_id(alist, code):
        return _.find(alist, lambda x: x['properties'].has_key('func_pos_code') and x['properties']['func_pos_code'] == code)
        # return _.matches_property('properties.func_pos_code', code)(alist)
    def get_point_id(alist, code):
        return _.find(alist, lambda x: x['properties'].has_key('function_pos_code') and x['properties'][
                                                                                            'func_pos_code'] == code)
    ret = []
    linesmap = {}
    with codecs.open(ur'd:\linesmap.json', 'r', 'utf-8-sig') as f:
        linesmap = json.loads(f.read())
    polyline_dn = mongo_find('kmgd', 'network', {'properties.webgis_type':'polyline_dn'})
    # towers = mongo_find('kmgd', 'features', {'properties.webgis_type':'point_tower'})
    idx = 0
    for k in linesmap.keys():
        codes = _.uniq(_.flatten(linesmap[k]))
        o = get_line_id(polyline_dn, k)
        if o:
            # l = mongo_find('kmgd', 'features', {'properties.line_func_code':k})
            # ids = _.pluck(l, '_id')
            ll = mongo_find('kmgd', 'features', {'properties.function_pos_code':{'$in':codes}})
            if len(ll):
                lll = _.pluck(ll, '_id')
                o['properties']['nodes'] = lll
                # o = add_mongo_id(o)
                ret.append(o)
                idx += 1
                # if idx > 10:
                #     break
    mongo_action('kmgd', 'network', 'save', ret)
Beispiel #9
0
    def update_search_vector(self):
        """
    Fill the search_vector using self.data:
    e.g. get data['title'] if is a basestring or data['title']['en_US'] according to the values contained into settings.LANGUAGES
    Note that a language configuration can be done as well, in this case consider the last value in settings.LANGUAGES (e.g. 'english')
    Then fill search_vector with authors and tags.
    """
        from django.db import connection
        from django.conf import settings
        from pydash import py_

        fields = (('title', 'A'), ('abstract', 'B'))
        contents = []

        _metadata = self.data.get('data')

        for _field, _weight in fields:
            default_value = _metadata.get(_field, None)
            value = u"\n".join(
                filter(None, [
                    default_value
                    if isinstance(default_value, basestring) else None
                ] + list(
                    set(
                        py_.get(_metadata, '%s.%s' % (_field, lang[2]), None)
                        for lang in settings.LANGUAGES))))
            if value:
                contents.append((value, _weight, 'simple'))

        authors = u", ".join([
            u'%s - %s' % (t.get('fullname', ''), t.get('affiliation', ''))
            for t in self.data.get('authors', [])
        ])

        # # well, quite complex.
        tags = u", ".join(
            set(
                filter(
                    None,
                    py_.flatten([[
                        py_.get(tag.get('data'), 'name.%s' % lang[2], None)
                        for lang in settings.LANGUAGES
                    ] + [tag.get('slug'), tag.get('name')]
                                 for tag in self.data.get('tags', [])]))))

        #
        if authors:
            contents.append((authors, 'A', 'simple'))
        if tags:
            contents.append((tags, 'C', 'simple'))

        # contents.append((u"\n".join(BeautifulSoup(markdown(u"\n\n".join(filter(None,[
        #     self.contents,
        #   ])), extensions=['footnotes'])).findAll(text=True)), 'B', 'simple'))

        q = ' || '.join([
            "setweight(to_tsvector('simple', COALESCE(%%s,'')), '%s')" % weight
            for value, weight, _config in contents
        ])

        # print contents

        with connection.cursor() as cursor:
            cursor.execute(
                ''.join([
                    """
        UPDATE ober_story SET search_vector = x.weighted_tsv, search_text=x.text FROM (  
          SELECT id,""", q, """
                AS weighted_tsv,
            %s as text
            FROM ober_story
          WHERE ober_story.id=%s
        ) AS x
        WHERE x.id = ober_story.id
      """
                ]), [value for value, _w, _c in contents] +
                ['\n\n'.join([value for value, weight, _config in contents])] +
                [self.id])

        logger.debug('story {pk:%s, title:%s} search_vector updated.' %
                     (self.pk, self.title))

        return contents