Esempio n. 1
0
    def get(self, *args):
        url = self.request.uri
        qs = urllib.urlsplit(url)
        parsed = urllib.parse_qs(qs.query)

        """Placeholder for supporting inheritance.
        Inheritance is currently not implemented."""
        query = """
            SELECT w_id, w_name, p_id, p_category, p_name, p_value FROM {%1}.widget_property wp
            JOIN {%1}.widget w ON (wp.wp_widget_id = w.w_id)
            JOIN {%1}.property p ON (wp.wp_property_id = p.p_id);
        """
        query = query.replace('{%1}', self.schema)
        cur = self.db.cursor()
        cur.execute(query)

        """Check if the query parameters are valid."""
        data = c.parsedata(cur)
        categories = py_.pluck(data, 'p_category')
        valid_params = py_.intersection(parsed.keys(), categories)

        """Create a hash with widget id as key."""
        data_by_id = py_.group_by(data, 'w_id')

        """For each widget, create a new widget object to return."""
        widgets = []
        for wid in data_by_id.keys():
            this = data_by_id[wid]
            by_cat = py_.group_by(this, 'p_category')
            uniq_cat = py_.uniq(py_.pluck(this, 'p_category'))

            widget = {
                'w_id': wid,
                'w_name': py_.uniq(py_.pluck(this, 'w_name'))[0],
                'match': True
            }

            """Create a new key, val pair for each property
            for the widget."""
            for cat in uniq_cat:
                widget[cat] = py_.pluck(by_cat[cat], 'p_name')

            """Check if each property associated with the
            widget matches the query parameters."""
            for key in valid_params:
                if key in by_cat.keys():
                    widget['match'] = widget['match'] and len(py_.intersection(widget[key], parsed[key])) > 0

            widgets.append(widget)

        """If query parameters are not provided or invalid,
        return all widgets without filtering."""
        if len(valid_params) == 0:
            ret = widgets
        else:
            ret = py_.filter(widgets, {'match': True})

        self.write(json.dumps(ret, indent=4, separators=(',', ': ')))
Esempio n. 2
0
 def __init__(self, items: typing.List[LokaliseKey]):
     self.group = py_.group_by(items, 'key_base_name')
     keys = list(self.group.keys())
     self.sorted_keys = py_.sort_by(
         keys,
         lambda key: self.sort_by_translations_timestamp(self.group[key]),
         reverse=True)
Esempio n. 3
0
def rules_score_assign(array_of_scores):
        print('rules scores assign in util')
        print(array_of_scores[0], array_of_scores[1], 'values')
        string_array = map(str, array_of_scores)
        sql_query = f"select array_to_json(array_agg(row_to_json(d)))from (select \"Schwab\".\"Score\", \"Schwab\".\"RiskType\"from \"public\".\"Schwab\" WHERE  \"UniqueID\" IN ('{array_of_scores[0]}','{array_of_scores[1]}','{array_of_scores[2]}','{array_of_scores[3]}','{array_of_scores[4]}','{array_of_scores[5]}','{array_of_scores[6]}')) d"
        record = establish_conection(sql_query)
        object_list_of_results = record[0][0]
        print(object_list_of_results)
        object_list_of_results = record[0][0]
        print(object_list_of_results)
        #separate it into individual riskTypes
        group_results = py_.group_by(object_list_of_results, 'RiskType')
        #Assign to new objects
        th_object = group_results['TH']
        rt_object = group_results['RT']
        print(th_object, 'th_object')
        print(rt_object)
        #intialise variables for sum #TH risktypes only consists of 2 questions
        th_score = 0
        rt_score = 0

        for i in range(len(th_object)):
            th_score += th_object[i]['Score']
        
        for i in range(len(rt_object)):
            rt_score += rt_object[i]['Score']
        
        
        print(th_score, rt_score, 'scores in data access layer')
        return th_score, rt_score
Esempio n. 4
0
    def get_files_pars(self):
        en_fluent_files = self.project.get_fluent_files_by_dir(
            project.en_locale_dir_path)
        ru_fluent_files = self.project.get_fluent_files_by_dir(
            project.ru_locale_dir_path)

        en_fluent_relative_files = list(
            map(lambda f: self.get_relative_path_dict(f, 'en-US'),
                en_fluent_files))
        ru_fluent_relative_files = list(
            map(lambda f: self.get_relative_path_dict(f, 'ru-RU'),
                ru_fluent_files))
        relative_files = py_.flatten_depth(py_.concat(
            en_fluent_relative_files, ru_fluent_relative_files),
                                           depth=1)

        return py_.group_by(relative_files, 'relative_path_from_locale')
Esempio n. 5
0
    def entity_consensus(self, entities: List[BaseEntity],
                         input_size: int) -> List[BaseEntity]:
        """
        Combine entities by type and value.

        This issue:
        https://github.com/Vernacular-ai/dialogy/issues/52
        Points at the problems where we can return multiple identical entities,
        depending on the number of transcripts that contain same body.

        :param entities: A list of entities which may have duplicates.
        :type entities: List[BaseEntity]
        :return: A list of entities scored and unique by type and value.
        :rtype: List[BaseEntity]
        """
        entity_type_value_group = py_.group_by(
            entities, lambda entity: (entity.type, entity.get_value()))
        aggregate_entities = self.aggregate_entities(entity_type_value_group,
                                                     input_size)
        return self.apply_filters(aggregate_entities)
Esempio n. 6
0
    def from_lokalise_keys(cls, keys: typing.List[LokaliseKey]):
        attributes_keys = list(filter(lambda k: k.is_attr, keys))
        attributes = list(
            map(
                lambda k: FluentAstAttribute(
                    id='.{name}'.format(name=k.get_key_last_name(k.key_name)),
                    value=FluentSerializedMessage.get_attr(
                        k, k.get_key_last_name(k.key_name)),
                    parent_key=k.get_parent_key()), attributes_keys))
        attributes_group = py_.group_by(attributes, 'parent_key')

        serialized_message = ''
        for key in keys:
            if key.is_attr:
                continue
            key_name = key.get_key_last_name(key.key_name)
            key_value = key.get_translation('ru').data['translation']
            key_attributes = []

            if len(attributes_group):
                k = f'{key.get_key_base_name(key.key_name)}.{key_name}'
                key_attributes = attributes_group[
                    k] if k in attributes_group else []

            message = key.serialize_message()
            full_message = cls.from_yaml_element(key_name, key_value,
                                                 key_attributes,
                                                 key.get_parent_key(), True)

            if full_message:
                serialized_message = serialized_message + '\n' + full_message
            elif message:
                serialized_message = serialized_message + '\n' + message
            else:
                raise Exception('Что-то пошло не так')

        return serialized_message
def generateDraft(filename, roster_file, rosterNames):
    data = {}
    with open(filename) as draft_file:
        data = json.load(draft_file)
    draft_file.close()

    rNames = {}
    with open(rosterNames) as s_file:
        rNames = json.load(s_file)
    s_file.close()

    rosters = {}
    with open(roster_file) as r_file:
        rosters = json.load(r_file)
    r_file.close()
    mappedRosters = list(
        map(lambda x: {
            "roster_id": x["roster_id"],
            "players": x["players"]
        }, rosters))
    groupedRosters = py_.group_by(mappedRosters, "roster_id")
    # print(groupedRosters)
    mappedData = list(
        map(
            lambda x: {
                "roster_id": x["roster_id"],
                "player_id": x["player_id"],
            }, data))

    groupedData = py_.group_by(mappedData, "roster_id")
    groupedOutput = {}
    names = []
    drafted = []
    acquired = []
    percentage = []
    for key, value in groupedData.items():
        players = []
        for player in value:
            players.append(player["player_id"])
        rosterPlayers = groupedRosters[key][0]["players"]
        # print(len(rosterPlayers))
        groupedOutput[key] = players
        output = {}
        output["drafted"] = py_.intersection(players, rosterPlayers)
        output["acquired"] = py_.difference(rosterPlayers, players)
        # print(rNames["players"][str(key)]["display_name"] + ","  + str(len(output["drafted"])) + "," + str(len(output["acquired"])))
        # table = pandas.DataFrame({"names": rNames["players"][str(key)]["display_name"]})
        # print(table)
        names.append(rNames["players"][str(key)]["display_name"])
        drafted.append(len(output["drafted"]))
        acquired.append(len(output["acquired"]))
        percentage.append(round(len(output["drafted"]) / 15, 2))
        groupedOutput[key] = output

    table = pandas.DataFrame({
        "names": names,
        "drafted": drafted,
        "acquired": acquired,
        "Percentage": percentage
    })
    output = {}
    output["draft"] = groupedOutput

    league_id = "458672130456809472"
    draft_file = "leagues/" + league_id + "/draftSorted.json"
Esempio n. 8
0
    },
    {
        'id': 3,
        'name': 'zcx3'
    },
    {
        'id': 3,
        'name': 'zcx3'
    },
    {
        'id': 4,
        'name': 'zcx4'
    },
]

print(py_.group_by(data, 'id'))


class Cat:
    def __init__(self, name='zcx', gender='male', age=18):
        self.name = name
        self.gender = gender
        self.age = age

    def __repr__(self):
        return '{}-{}-{}'.format(self.name, self.gender, self.age)


cat1 = Cat()
cat2 = Cat('jack', 'male', 20)
cat3 = Cat('jack', 'male', 24)