Пример #1
0
	def __on_pairing_event(self, action, pairing):
		key = (pairing.user_unique_id, pairing.player_id)
		if action == "added":
			for notifier in _.filter_(self.__notifiers, lambda n: n.user_unique_id == pairing.user_unique_id):
				notifier.set_player_ids(database.get_user_paired_player_ids(pairing.user_unique_id))
		elif action == "removed":
			for notifier in _.filter_(self.__notifiers, lambda n: n.user_unique_id == pairing.user_unique_id):
				notifier.set_player_ids(database.get_user_paired_player_ids(pairing.user_unique_id))
def add_relationship_mutations(field_type, names_only=False):
    mutations = ''
    mutation_names = []

    def f1(field):
        i = 0
        while i < len(field.ast_node.directives):
            return field.ast_node.directives[i].name.value == 'relation'

    relationship_fields = filter_(field_type.fields, f1)

    for field in relationship_fields:
        relation_directive = (
            filter_(field.ast_node.directives, lambda d: d.name.value == 'relation')[0])
        rel_type = filter_(relation_directive.arguments, lambda a: a.name.value == 'name')[0]
        rel_direction = filter_(relation_directive.arguments, lambda a: a.name.value == 'direction')[0]

        if rel_direction.value.value in ['out', 'OUT']:
            from_type = field_type
            to_type = inner_type(field.type)
        else:
            from_type = inner_type(field.type)
            to_type = field_type
        from_pk = primary_key(from_type)
        to_pk = primary_key(to_type)

        # FIXME: could add relationship properties here
        mutations += (f'Add{from_type.name}{to_type.name}'
                      f'({low_first_letter(from_type.name + from_pk.ast_node.name.value)}: {inner_type(from_pk.type).name}!, '
                      f'{low_first_letter(to_type.name + to_pk.ast_node.name.value)}: {inner_type(to_pk.type).name}!): '
                      f'{from_type.name} @MutationMeta(relationship: "{rel_type.value.value}", from: "{from_type.name}", to: "{to_type.name}")')
        mutation_names.append(f'Add{from_type.name}{to_type.name}')
    if names_only:
        return mutation_names
    else:
        return mutations
    def all(cls, *args):
        try:
            dem_args = list(args)
            if some(dem_args, lambda result: isinstance(result, Error)):
                return head(
                    filter_(dem_args,
                            lambda result: isinstance(result, Error)))

            if every(dem_args, lambda result: isinstance(result, Ok) == False):
                return Error(
                    Exception('Some items passed in were not a Result.'))

            return Ok(map_(dem_args, lambda result: result.getValue()))
        except Exception as e:
            return Error(e)
Пример #4
0
def check_all():
    '''Check all spec files, all specs.'''
    spec_files = ps.filter_(os.listdir(SPEC_DIR), lambda f: f.endswith('.json') and not f.startswith('_'))
    for spec_file in spec_files:
        spec_dict = util.read(f'{SPEC_DIR}/{spec_file}')
        for spec_name, spec in spec_dict.items():
            try:
                spec['name'] = spec_name
                spec['git_SHA'] = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode().strip()
                check(spec)
            except Exception as e:
                logger.exception(f'spec_file {spec_file} fails spec check')
                raise e
    logger.info(f'Checked all specs from: {ps.join(spec_files, ",")}')
    return True
Пример #5
0
    def for_each_facility(self, data, combination, previous_cycle_data=None):
        result = NOT_REPORTING

        values = values_for_records(self.fields, data.c_records)
        number_of_consumption_record_blanks = len(
            pydash.filter_(values, lambda v: v is None))
        if data.c_count == 0 and data.a_count == 0 and data.p_count == 0:
            return result
        if data.c_count < 25 or data.a_count < 22 or data.p_count < 7:
            result = NO
        elif number_of_consumption_record_blanks > 2:
            result = NO
        else:
            result = YES
        return result
def find_registry_tags(registry_url, repo_name, src_tag, cred):
    url = "https://{}/api/repositories/{}/tags".format(registry_url, repo_name)
    payload = ""
    headers = {
        'Accept': "application/json",
        'Content-Type': "application/json",
        'Authorization': "Basic YWRtaW46SGFyYm9yMTIzNDU="
    }
    if cred:
        response = get(url,
                       data=payload,
                       headers=headers,
                       auth=HTTPBasicAuth(cred['username'], cred['password']))
    else:
        response = get(url, data=payload, headers=headers)
    # if response.status_code != 200:
    #     raise requests.ConnectTimeout
    response = response.json()
    replicated_tags = []
    digest = _.filter_(response, {"name": src_tag})
    if not digest:
        return [src_tag]
    replicated_tags = _.filter_(response, {"digest": digest[0]['digest']})
    return _.map_(replicated_tags, 'name')
Пример #7
0
def cypher_query(context,
                 resolve_info,
                 first=-1,
                 offset=0,
                 _id=None,
                 **kwargs):
    types_ident = type_identifiers(resolve_info.return_type)
    type_name = types_ident.get('type_name')
    variable_name = types_ident.get('variable_name')
    schema_type = resolve_info.schema.get_type(type_name)

    filtered_field_nodes = filter_(
        resolve_info.field_nodes,
        lambda n: n.name.value == resolve_info.field_name)

    # FIXME: how to handle multiple field_node matches
    selections = extract_selections(
        filtered_field_nodes[0].selection_set.selections,
        resolve_info.fragments)

    # if len(selections) == 0:
    #     # FIXME: why aren't the selections found in the filteredFieldNode?
    #     selections = extract_selections(resolve_info.operation.selection_set.selections, resolve_info.fragments)

    # FIXME: support IN for multiple values -> WHERE
    arg_string = re.sub(r"\"([^(\")]+)\":", "\\1:", json.dumps(kwargs))

    id_where_predicate = f'WHERE ID({variable_name})={_id} ' if _id is not None else ''
    outer_skip_limit = f'SKIP {offset}{" LIMIT " + str(first) if first > -1 else ""}'

    cyp_dir = cypher_directive(resolve_info.schema.query_type,
                               resolve_info.field_name)
    if cyp_dir:
        custom_cypher = cyp_dir.get('statement')
        query = (
            f'WITH apoc.cypher.runFirstColumn("{custom_cypher}", {arg_string}, true) AS x '
            f'UNWIND x AS {variable_name} RETURN {variable_name} '
            f'{{{build_cypher_selection("", selections, variable_name, schema_type, resolve_info)}}} '
            f'AS {variable_name} {outer_skip_limit}')
    else:
        # No @cypher directive on QueryType
        query = f'MATCH ({variable_name}:{type_name} {arg_string}) {id_where_predicate}'
        query += (
            f'RETURN {variable_name} '
            f'{{{build_cypher_selection("", selections, variable_name, schema_type, resolve_info)}}}'
            f' AS {variable_name} {outer_skip_limit}')

    return query
Пример #8
0
def export_settings(conn_id: int,
                    payload: s.ExportIn,
                    db: Session = Depends(get_db)):
    conn = db.query(models.Connection).get(conn_id)
    result = {}
    if "expressions" in payload.options:
        result["expressions"] = [
            e.dict() for e in redact.get_expressions(connection=conn)
        ]
    if "policies" in payload.options:
        policies = redact.get_policies(connection=conn)
        columns = redact.get_columns(connection=conn)
        result["policies"] = []
        for p in policies:
            policy = p.dict()
            policy["columns"] = [
                c.dict() for c in pydash.filter_(
                    columns,
                    lambda x: x.object_owner == p.object_owner and x.
                    object_name == p.object_name,
                )
            ]
            result["policies"].append(policy)
    if "categories" in payload.options:
        result["categories"] = [
            CategoryOut.from_orm(c).dict()
            for c in db.query(models.Category).filter(
                models.Connection.id == conn_id).all()
        ]
    if "plans" in payload.options:
        result["plans"] = [
            PlanOut.from_orm(p).dict() for p in db.query(models.Plan).filter(
                models.Connection.id == conn_id).all()
        ]
    if "rules" in payload.options:
        result["rules"] = [
            RuleOut.from_orm(r).dict() for r in db.query(models.Rule).filter(
                models.Connection.id == conn_id).all()
        ]

    with tempfile.NamedTemporaryFile(prefix=conn.name,
                                     suffix="_export.json",
                                     mode="w",
                                     delete=False) as handler:
        handler.write(json.dumps(jsonable_encoder(result), indent=4))
        name = handler.name

    return FileResponse(name)
Пример #9
0
def check_all():
    '''Check all spec files, all specs.'''
    spec_files = ps.filter_(os.listdir(SPEC_DIR), lambda f: f.endswith('.json') and not f.startswith('_'))
    for spec_file in spec_files:
        spec_dict = util.read(f'{SPEC_DIR}/{spec_file}')
        for spec_name, spec in spec_dict.items():
            # fill-in info at runtime
            spec['name'] = spec_name
            spec = extend_meta_spec(spec)
            try:
                check(spec)
            except Exception as e:
                logger.exception(f'spec_file {spec_file} fails spec check')
                raise e
    logger.info(f'Checked all specs from: {ps.join(spec_files, ",")}')
    return True
Пример #10
0
    async def download_maps(self):
        try:
            map_list = await self._api_config.get_maps()
            def cb(m):
                return pydash.get(m, 'site')
            map_list = pydash.filter_(map_list, cb)
            if len(map_list) is 0:
                print('there are no maps on site configuration')
                return False


        except Exception as err:
            print('failed to download maps')
            return False

        return True
Пример #11
0
    async def test_command(self, ctx: Context):
        """
        Count how many messages you have in this channel.
        """

        placeholder = await self.bot.say('Fetching messages..')
        messages = [m async for m in self.bot.logs_from(ctx.message.channel)]
        my_messages = pydash.filter_(
            messages,
            lambda m: m.author == ctx.message.author
        )

        return await self.bot.edit_message(
            placeholder,
            f'You have {len(my_messages)} messages.'
        )
Пример #12
0
    async def turn_on_electrodes(self, payload, params):
        # Get the three object from device-model
        scicad = MicropedeAsync(APPNAME, port=self.port, loop=self.loop)
        three_object = await scicad.get_state('device-model', 'three-object')
        active_electrodes = payload

        def active_filter(obj):
            return _.includes(active_electrodes, obj["id"])

        active_objects = _.filter_(three_object, active_filter)
        channels = _.map_(_.map_(active_objects, "channel"), int)

        max_channels = self.control_board.number_of_channels
        channel_states = np.zeros(max_channels, dtype=int)
        channel_states[channels] = 1
        self.control_board.set_state_of_channels(channel_states)
        print(self.control_board.state_of_channels)
        await self.update_board_info()
Пример #13
0
def check_all():
    '''Check all spec files, all specs.'''
    spec_files = ps.filter_(
        os.listdir(SPEC_DIR),
        lambda f: f.endswith('.json') and not f.startswith('_'))
    for spec_file in spec_files:
        spec_dict = util.read(f'{SPEC_DIR}/{spec_file}')
        for spec_name, spec in spec_dict.items():
            try:
                spec['name'] = spec_name
                spec['git_SHA'] = subprocess.check_output(
                    ['git', 'rev-parse', 'HEAD']).decode().strip()
                check(spec)
            except Exception as e:
                logger.exception(f'spec_file {spec_file} fails spec check')
                raise e
    logger.info(f'Checked all specs from: {ps.join(spec_files, ",")}')
    return True
Пример #14
0
def find_adjacent_cells(origin: Coord,
                        cells: List[Coord],
                        only_orthogonal: bool = False) -> List[Coord]:
    """
    Находит соседние клетки от origin среди cells
    :param only_orthogonal:
    :param origin:
    :param cells:
    :return:
    """
    chosen_nearness = orthogonal if only_orthogonal else adjacent_square
    diffs = py_.map_(cells, lambda other: other - origin)
    adjacent = py_.filter_(
        py_.zip_(cells, diffs),
        lambda pair: pair[1] in chosen_nearness,
    )

    return py_.map_(adjacent, 0)
Пример #15
0
    def filter_list(self, by: str):
        """Filter a dict or list
        
        Args:
            by (str): Required. What to filter by. Defaults to None.
        
        Raises:
            TypeError: If state is not a list
        
        Returns:
            Chepy: The Chepy object.

        Examples:
            >>> Chepy('[{"a": 1}, {"b": 2}, {"a": 1, "b": 3}]').str_list_to_list().filter_list("b").o
            [{"b": 2}, {"a": 1, "b": 3}]
        """
        assert isinstance(self.state, list), StateNotList()
        self.state = pydash.filter_(self.state, by)
        return self
Пример #16
0
    def get_yield_strength_to_composed_data():
        conn = db.get_connection()
        cur = conn.cursor()
        main_query_select = 'SELECT main_info_id, sortament, sigma_t FROM rloveshhenko$mydbtest.mechanical_properties WHERE sigma_t != " " and main_info_id in (SELECT id FROM mydbtest.main_info WHERE classification like "%Сталь%");'

        ids_query_select = 'SELECT distinct main_info_id FROM rloveshhenko$mydbtest.mechanical_properties WHERE sigma_t != " " and main_info_id in (SELECT id FROM mydbtest.main_info WHERE classification like "%Сталь%");'

        cur.execute(main_query_select)
        data = cur.fetchall()
        cur.execute(ids_query_select)
        ids = cur.fetchall()

        sigmas = map_(
            ids, lambda item: {
                'id':
                item['main_info_id'],
                'sigmas':
                map_(
                    filter_(
                        data, lambda it: it['main_info_id'] == item[
                            'main_info_id']), lambda x: get(x, 'sigma_t'))
            })

        new_sigmas = map_(
            sigmas, lambda item: {
                'id':
                item['id'],
                'sigma':
                format(
                    reduce_(
                        item['sigmas'], lambda total, x: float(total) + float(
                            x) / len(item['sigmas']), 0), '.2f')
            })

        for item in new_sigmas:
            cur.execute(
                "UPDATE rloveshhenko$mydbtest.composed_data SET sigma_t = %s WHERE id = %s",
                (item['sigma'], item['id']))
            conn.commit()

        cur.close()
        return True
Пример #17
0
def chooseSong(song_data):
    """
    chooses a song to tweet and updates recently choosen songs
    """
    recent = song_data['recentSongIds']
    songs = song_data['songs']

    # filter out recently choosen songs and randomly choose a song
    filtered_song_ids = pydash.filter_(songs.keys(), lambda x: x not in recent)
    song_id = pydash.sample(filtered_song_ids)

    # get chosen song and increment play count
    song = songs[song_id]
    song['playCount'] += 1

    # pop least recently choosen song and push new one
    if len(recent) == 7:
        pydash.shift(recent)
    pydash.push(recent, song_id)

    return song
Пример #18
0
    def get_containers(self, name=None, database=None):
        docker = self.app.docker
        if name:
            if not len(re.findall(r'^anydb_', name)):
                if database and not len(
                        re.findall(r'^anydb_' + database + '_', name)):
                    name = 'anydb_' + database + name
                else:
                    name = 'anydb_' + name
        else:
            if database:
                name = 'anydb_' + database + '_'
            else:
                name = 'anydb_'

        def filter_(value):
            if len(re.findall(r'^' + name, value.name)):
                return True
            return False

        return _.filter_(docker.containers.list(all=True), filter_)
Пример #19
0
    def get_markets(self, main_market_filter=None):
        """
        Gets all the Bittrex markets and filters them based on the main market filter

        :param main_market_filter: Main market to filter on (ex: BTC, ETH, USDT)
        :type main_market_filter: str

        :return: All Bittrex markets (with filter applied, if any)
        :rtype : list
        """
        markets = self.Bittrex.get_markets()
        if not markets["success"]:
            logger.error("Failed to fetch Bittrex markets")
            exit()

        markets = markets["result"]
        if main_market_filter is not None:
            market_check = main_market_filter + "-"
            markets = py_.filter_(markets, lambda market: market_check in market["MarketName"])
        markets = py_.map_(markets, lambda market: market["MarketName"])
        return markets
Пример #20
0
    def filter_by(self, predicate: Any = None):
        """Filter a dict or list
        
        Args:
            predicate (Any, optional): What to filter by. Defaults to None.
        
        Raises:
            TypeError: If state is not a list or dict
        
        Returns:
            Chepy: The Chepy object.

        Examples:
            >>> Chepy('[{"a": 1}, {"b": 2}, {"a": 1, "b": 3}]').str_list_to_list().filter_by("b").o
            [{"b": 2}, {"a": 1, "b": 3}]
        """
        if isinstance(self.state, (list, dict)):
            self.state = pydash.filter_(self.state, predicate)
            return self
        else:  # pragma: no cover
            raise TypeError("State is not a list")
Пример #21
0
    def get_markets(self, main_market_filter=None):
        """
        Gets all the Bittrex markets and filters them based on the main market filter

        :param main_market_filter: Main market to filter on (ex: BTC, ETH, USDT)
        :type main_market_filter: str

        :return: All Bittrex markets (with filter applied, if any)
        :rtype : list
        """
        markets = self.Bittrex.get_markets()
        if not markets["success"]:
            logger.error("Failed to fetch Bittrex markets")
            exit()

        markets = markets["result"]
        if main_market_filter is not None:
            market_check = main_market_filter + "-"
            markets = py_.filter_(
                markets, lambda market: market_check in market["MarketName"])
        markets = py_.map_(markets, lambda market: market["MarketName"])
        return markets
Пример #22
0
def get_markets(main_market_filter=None):
    """
    Gets all the Bittrex markets and filters them based on the main market filter

    :param main_market_filter: Main market to filter on (ex: BTC, ETH, USDT)
    :type main_market_filter: str

    :return: All Bittrex markets (with filter applied, if any)
    :rtype : list
    """

    markets = Bittrex.get_markets()
    if not markets['success']:
        logger.error('Failed to fetch Bittrex markets')
        exit()

    markets = markets['result']
    if main_market_filter is not None:
        market_check = main_market_filter + '-'
        markets = py_.filter_(markets, lambda market: market_check in market['MarketName'])
    markets = py_.map_(markets, lambda market: market['MarketName'])
    return markets
    def get_countries_data_by_currency(self, currency_name, continent_name=False):

        # Parameters validation.
        if (not currency_name):
            raise NameError(
                'You must provide a currency name in order to use the get_countries_data_by_currency() method.')

        # Setting up conditions.
        conditions = {'Currency Name': currency_name.strip()}

        # Checking if a continent name has been provided.
        if continent_name:

            # Sanitizing input fields.
            continent_name = self._sanitize_continent_name(continent_name)

            # Trimming and assigning value to the conditions object.
            conditions['Continent Name'] = continent_name.strip()

        # Filtering GWOD and return.
        countries = pydash.filter_(self._GWOD, conditions)
        countries = pydash.sort_by(countries, ['Country Name'])

        return countries
Пример #24
0
def convert_csv(path):
    ap = []
    result = ""

    with open(path) as csvfile:
        dialect = csv.Sniffer().sniff(csvfile.read(4096))
        dialect.doublequote = True
        csvfile.seek(0)
        reader = csv.reader(csvfile, dialect)
        header = False

        for row in reader:
            if not header:
                header = row
            else:
                item = {}
                for i in range(0, len(row)):
                    item[header[i]] = row[i]
                ap.append(item)

    domains = pydash.without(pydash.uniq(pydash.map_(ap, 'EA-Domain')), '', None)
    codelists = pydash.filter_(ap, {'EA-Type': 'ENUMERATION'})
    domains = list(set(domains) - set(pydash.map_(codelists.copy(), 'EA-Name')))
    domains.sort()
    final_domains = []
    final_datypes = []
    classes = pydash.filter_(ap, {'EA-Type': 'CLASS'}) + pydash.filter_(ap, {'EA-Type': 'DATATYPE'})
    datatypes = pydash.map_(pydash.filter_(ap, {'EA-Type': 'DATATYPE'}), 'EA-Name')
    classes_only = pydash.map_(pydash.filter_(ap, {'EA-Type': 'CLASS'}), 'EA-Name')
    attributes = pydash.filter_(ap, {'EA-Type': 'attribute'}) + pydash.filter_(ap, {'EA-Type': 'connector'})
    attributes = pydash.sort_by(attributes, 'EA-Domain')
    # for enumeration in codelists:
    #    attributes = pydash.remove(attributes, {'EA-Domain': enumeration})

    title = os.path.splitext(os.path.basename(path))[0]
    package = pydash.find(ap, {'EA-Type': 'Package'})

    if len(domains) > 0:
        for domain in domains:

            klassen = pydash.filter_(classes, {'EA-Name': domain})
            if 0 < len(klassen) <= 1:
                klasse = pydash.find(classes, {'EA-Name': domain})
                if klasse['EA-Type'] == 'DATATYPE':
                    result += "\n[%s]\n" % domain
                    final_datypes.append(domain)
                else:
                    result += "\n[%s]\n" % domain
                    final_domains.append(domain)

                if klasse is not None:
                    result += 'ap-definition-nl=%s\n' % klasse['ap-definition-nl']
                    result += 'ap-usagenote-nl=%s\n' % klasse['ap-usageNote-nl']
                    result += 'namespace=%s\n' % klasse['namespace']
                    result += 'localname=%s\n' % klasse['localname']


                domain_attributes = pydash.filter_(attributes, {'EA-Domain': domain})
                domain_attribute_names = pydash.without(pydash.uniq(pydash.map_(domain_attributes, 'EA-Name')), '', None) #localname

                result += 'attributes=%s\n' % ','.join(domain_attribute_names)

                for attr_name in domain_attribute_names:
                    result += "\n[%s:%s]\n" % (domain, attr_name)
                    attr = pydash.find(domain_attributes, {'EA-Name': attr_name})
                    if attr['range'] == "http://www.w3.org/2004/02/skos/core#Concept":
                        ap_codelist = pydash.find(codelists, {'EA-Name': attr['EA-Range']})
                        if not ap_codelist is None:
                            attr['ap-codelist'] = ap_codelist['ap-codelist']
                    for key in attr:
                        result += '%s=%s\n' % (key, attr[key].replace('&', '&amp;'))

            elif len(klassen) > 1:
                for klasse in klassen:
                    if klasse['ap-label-nl'] == "":
                        klasse['ap-label-nl'] = domain
                    if klasse['EA-Type'] == 'DATATYPE':
                        result += "\n[%s]\n" % klasse['ap-label-nl']
                        final_datypes.append(klasse['ap-label-nl'])
                    else:
                        result += "\n[%s]\n" % klasse['ap-label-nl']
                        final_domains.append(klasse['ap-label-nl'])
                    if klasse is not None:
                        result += 'ap-definition-nl=%s\n' % klasse['ap-definition-nl']
                        result += 'ap-usagenote-nl=%s\n' % klasse['ap-usageNote-nl']
                        result += 'namespace=%s\n' % klasse['namespace']
                        result += 'localname=%s\n' % klasse['localname']

                    domain_attributes = pydash.filter_(attributes,
                                                       {'EA-Domain-GUID': klasse['EA-GUID']})
                    domain_attribute_names = pydash.without(pydash.uniq(
                        pydash.map_(domain_attributes, 'localname')), '', None)

                    result += 'attributes=%s\n' % ','.join(
                        domain_attribute_names)

                    for attr_name in domain_attribute_names:
                        result += "\n[%s:%s]\n" % (klasse['ap-label-nl'], attr_name)
                        attr = pydash.find(domain_attributes,
                                           {'localname': attr_name})
                        if attr[
                            'range'] == "http://www.w3.org/2004/02/skos/core#Concept":
                            ap_codelist = pydash.find(codelists, {
                                'EA-Name': attr['EA-Range']})
                            if not ap_codelist is None:
                                attr['ap-codelist'] = ap_codelist[
                                    'ap-codelist']
                        for key in attr:
                            result += '%s=%s\n' % (key, attr[key])

        result += "\n[overview]\n"
        final_domains = list(set(final_domains))
        final_domains.sort()
        result += 'entities=%s\n' % ','.join(final_domains)
        result += 'dtypes=%s\n' % ','.join(final_datypes)
        if package is not None:
            result += 'package=%s\n' % package['EA-Name'].replace('OSLO-', '')
        result += 'title=%s\n' % title

    return [result, package['EA-Name'].replace('OSLO-', '')]
Пример #25
0
def cypher_mutation(context,
                    resolve_info,
                    first=-1,
                    offset=0,
                    _id=None,
                    **kwargs):
    # FIXME: lots of duplication here with cypherQuery, extract into util module
    types_ident = type_identifiers(resolve_info.return_type)
    type_name = types_ident.get('type_name')
    variable_name = types_ident.get('variable_name')
    schema_type = resolve_info.schema.get_type(type_name)

    filtered_field_nodes = filter_(
        resolve_info.field_nodes,
        lambda n: n.name.value == resolve_info.field_name)

    # FIXME: how to handle multiple field_node matches
    selections = extract_selections(
        filtered_field_nodes[0].selection_set.selections,
        resolve_info.fragments)

    # FIXME: support IN for multiple values -> WHERE
    arg_string = re.sub(r"\"([^(\")]+)\":", "\\1:", json.dumps(kwargs))

    id_where_predicate = f'WHERE ID({variable_name})={_id} ' if _id is not None else ''
    outer_skip_limit = f'SKIP {offset}{" LIMIT " + str(first) if first > -1 else ""}'

    cyp_dir = cypher_directive(resolve_info.schema.mutation_type,
                               resolve_info.field_name)
    if cyp_dir:
        custom_cypher = cyp_dir.get('statement')
        query = (
            f'CALL apoc.cypher.doIt("{custom_cypher}", {arg_string}) YIELD value '
            f'WITH apoc.map.values(value, [keys(value)[0]])[0] AS {variable_name} '
            f'RETURN {variable_name} {{{build_cypher_selection("", selections, variable_name, schema_type, resolve_info)}}} '
            f'AS {variable_name} {outer_skip_limit}')
    # No @cypher directive on MutationType
    elif resolve_info.field_name.startswith(
            'create') or resolve_info.field_name.startswith('Create'):
        # Create node
        # TODO: handle for create relationship
        # TODO: update / delete
        # TODO: augment schema
        query = (
            f'CREATE ({variable_name}:{type_name}) SET {variable_name} = $params RETURN {variable_name} '
            f'{{{build_cypher_selection("", selections, variable_name, schema_type, resolve_info)}}} '
            f'AS {variable_name}')
    elif resolve_info.field_name.startswith(
            'add') or resolve_info.field_name.startswith('Add'):
        mutation_meta = mutation_meta_directive(
            resolve_info.schema.mutation_type, resolve_info.field_name)
        relation_name = mutation_meta.get('relationship')
        from_type = mutation_meta.get('from')
        from_var = low_first_letter(from_type)
        to_type = mutation_meta.get('to')
        to_var = low_first_letter(to_type)
        from_param = resolve_info.schema.mutation_type.fields[
            resolve_info.
            field_name].ast_node.arguments[0].name.value[len(from_var):]
        to_param = resolve_info.schema.mutation_type.fields[
            resolve_info.
            field_name].ast_node.arguments[1].name.value[len(to_var):]
        query = (
            f'MATCH ({from_var}:{from_type} {{{from_param}: ${from_param}}}) '
            f'MATCH ({to_var}:{to_type} {{{to_param}: ${to_param}}}) '
            f'CREATE ({from_var})-[:{relation_name}]->({to_var}) '
            f'RETURN {from_var} '
            f'{{{build_cypher_selection("", selections, variable_name, schema_type, resolve_info)}}} '
            f'AS {from_var}')
    else:
        raise Exception('Mutation does not follow naming conventions')
    return query
Пример #26
0
# -*- coding: utf-8 -*-

from copy import deepcopy

import pydash as _

from .fixtures import parametrize


pydash_methods = _.filter_(dir(_), lambda m: callable(getattr(_, m, None)))


def test_chaining_methods():
    chain = _.chain([])

    for method in dir(_):
        if not callable(method):
            continue

        chained = getattr(chain, method)
        assert chained.method is getattr(_, method)


@parametrize('value,methods', [
    ([1, 2, 3, 4], [('without', (2, 3)),
                    ('reject', (lambda x: x > 1,))])
])
def test_chaining(value, methods):
    expected = deepcopy(value)
    actual = _.chain(deepcopy(value))
Пример #27
0
    def multi_file_handler(self, msg):
        '''子进程处理单个文件的写入
        '''
        try:
            # 上传进程内部加载sqlalchemy时需要控制每个进程的数据库连接池大小
            os.environ['SQLALCHEMY_POOL_SIZE'] = '1'
            from run import app as inner_app
            from app import aios_redis

            with inner_app.app_context():
                self.aios_redis = aios_redis

                file_key = msg.get('file_key')
                dir_path = msg.get('dir_path')
                file_name = msg.get('file_name')
                curr_chunk = msg.get('curr_chunk')
                total_chunks = msg.get('total_chunks')
                tenant_id = msg.get('tenant_id')
                user_id = msg.get('user_id')
                cache_expired_time = msg.get('cache_expired_time')

                wait_lock = f'plus_uploader:lock:{file_key}'
                while not self.aios_redis.setnx(wait_lock,
                                                f'lock.{curr_chunk}'):
                    # self.aios_print(file_key, curr_chunk, 'task waiting...')
                    time.sleep(0.001)
                else:
                    self.lock = f'{wait_lock} by {curr_chunk}'
                    self.aios_print(file_key, curr_chunk,
                                    f'current lock: lock.{curr_chunk}')
                    # 锁独占状态
                    # wait_lock超时时间 = 已存在但未合并的分片个数 * 单个分片预估的失效时间
                    parts = len(
                        _.filter_(
                            os.listdir(dir_path),
                            lambda x: '-' not in x and '.deleted' not in x))
                    count = max(parts, 1)
                    self.aios_print(f'文件存放目录{dir_path}, 实时已存在的分片数: {parts}')
                    self.aios_redis.expire(wait_lock,
                                           cache_expired_time * count)
                    # self.aios_print(f'修正的lock超时时间, {cache_expired_time * count}')
                    # 更新任务状态
                    args = {
                        'task_id': file_key,
                        'tenant_id': tenant_id,
                        'created_by': user_id,
                        'updated_by': user_id,
                        'batch': Utils.get_batch(dir_path),
                        'status': TASK_STATUS_NONE,
                        'chunks': '',
                        'size': 0,
                        'link': {
                            'host': Utils.get_host(dir_path)
                        }
                    }
                    taskdao = TaskDAO()
                    # task_json = taskdao.get_task_from_cache(tenant_id, file_key)
                    task_json = taskdao.get_task(tenant_id,
                                                 file_key,
                                                 json=True)
                    if task_json is None:
                        task_json = taskdao.add(args)

                    if task_json['status'] != TASK_STATUS_MERGED:
                        task_json['status'] = TASK_STATUS_MERGING
                        # taskdao.update(task_json)
                        TaskModel.bulk_update_mappings([task_json])

                        # 合并分片
                        # 合并后的完整文件路径
                        merged_file = os.path.join(dir_path, file_name)

                        merge_process = self.partation_merge(
                            dir_path, file_key, total_chunks)
                        merge_process = _.map_(
                            merge_process,
                            lambda x: x.replace(f'{file_key}.', ''))
                        except_complete_name = f'1-{total_chunks}' if total_chunks > 1 else '1'
                        # 检测是否完整 ['1-701'] except_complete_name 1-701
                        self.aios_print('检测是否完整', merge_process,
                                        'except_complete_name',
                                        except_complete_name)
                        if except_complete_name in merge_process:
                            for f in os.listdir(dir_path):
                                self.aios_print('比较文件名', f,
                                                except_complete_name)
                                if f.startswith(f'{file_key}.') and (
                                        not f.endswith(except_complete_name)):
                                    Utils.try_remove(os.path.join(dir_path, f))
                                    self.aios_print('删除残留异常文件', f)
                            # 修改合并后的文件名称
                            final_merged_file = os.path.join(
                                dir_path, f'{file_key}.{except_complete_name}')
                            shutil.move(final_merged_file, merged_file)
                            self.aios_print('文件改名', final_merged_file, '>>',
                                            merged_file)
                            # 记录任务状态
                            task_json['chunks'] = ','.join(
                                [str(i + 1) for i in range(total_chunks)])
                            task_json['status'] = TASK_STATUS_MERGED
                            task_json['size'] = os.path.getsize(merged_file)

                            task_json['link'] = {
                                'host': Utils.get_host(dir_path)
                            }

                            # 合并完成
                            self.aios_print(f'{curr_chunk}.合并完成')
                        else:
                            # ['1', '3-5', '10']
                            covert_process = []
                            for section in merge_process:
                                if '-' in section:
                                    [left, right] = section.split('-')
                                    covert_process.extend([
                                        str(i)
                                        for i in range(int(left),
                                                       int(right) + 1)
                                    ])
                                else:
                                    covert_process.append(section)

                            task_json['chunks'] = ','.join(covert_process)
                            # 不完整
                            self.aios_print(f'{curr_chunk}.不完整,继续等待')
                        # 保存到缓存
                        # taskdao.update(task_json)
                        self.aios_print(f'更新状态, {task_json}')
                        TaskModel.bulk_update_mappings([task_json])
                    else:
                        # 已经合并完成后,不需要再次合并,直接退出
                        self.aios_print(f'{curr_chunk}.已经合并完成后, 不需要再次合并')
                    # 释放锁
                    self.aios_redis.delete(wait_lock)
                    self.aios_print(f'结束.{curr_chunk}/{total_chunks}')

                    # 清理被标记的可删除分片
                    for file in os.listdir(dir_path):
                        if file.endswith('.deleted'):
                            self.aios_print('清理被标记的可删除分片', file)
                            Utils.try_remove(os.path.join(dir_path, file))

                return {'file_key': file_key, 'curr_chunk': curr_chunk}, None
        except Exception as err:
            import traceback
            traceback.print_exc()
            print('multi_file_handler', err)
            return None, {
                'file_key': msg['file_key'],
                'curr_chunk': msg['curr_chunk']
            }
def convert_contributor_csv(path, voc):
    items = []
    result = ""

    with open(path) as csvfile:
        dialect = csv.Sniffer().sniff(csvfile.read(4096))
        csvfile.seek(0)
        reader = csv.reader(csvfile, dialect)
        header = False

        for row in reader:
            if not header:
                header = row
            else:
                item = {}
                for i in range(0, len(row)):
                    item[header[i]] = row[i]
                items.append(item)

    contributors = pydash.filter_(items, {voc: 'C'})
    editors = pydash.filter_(items, {voc: 'E'})
    authors = pydash.filter_(items, {voc: 'A'})

    contributor_emails = pydash.map_(contributors.copy(), 'E-mail')
    editor_emails = pydash.map_(editors.copy(), 'E-mail')
    author_emails = pydash.map_(authors.copy(), 'E-mail')

    if len(items) > 0:
        result += "\n[overview_contributors]\n"
        result += 'voc=%s\n' % voc.lower()
        result += 'issued=%s\n' % time.strftime("%Y-%m-%d")
        if len(contributor_emails) > 0:
            result += 'contributors=%s\n' % ','.join(contributor_emails)
        if len(editor_emails) > 0:
            result += 'editors=%s\n' % ','.join(editor_emails)
        if len(author_emails) > 0:
            result += 'authors=%s\n' % ','.join(author_emails)

        for contributor in contributor_emails:
            result += "\n[contributor:%s]\n" % contributor
            bijdrager = pydash.find(contributors, {'E-mail': contributor})

            if bijdrager is not None:
                result += 'naam=%s, %s\n' % (bijdrager['Naam'],
                                             bijdrager['Voornaam'])
                result += 'email=%s\n' % bijdrager['E-mail']
                result += 'organisatie=%s\n' % bijdrager['Affiliatie']
                result += 'website=%s\n' % bijdrager['Website']

        for author in author_emails:
            result += "\n[author:%s]\n" % author
            bijdrager = pydash.find(authors, {'E-mail': author})

            if bijdrager is not None:
                result += 'naam=%s, %s\n' % (bijdrager['Naam'],
                                             bijdrager['Voornaam'])
                result += 'email=%s\n' % bijdrager['E-mail']
                result += 'organisatie=%s\n' % bijdrager['Affiliatie']
                result += 'website=%s\n' % bijdrager['Website']

        for editor in editor_emails:
            result += "\n[editor:%s]\n" % editor
            bijdrager = pydash.find(editors, {'E-mail': editor})

            if bijdrager is not None:
                result += 'naam=%s, %s\n' % (bijdrager['Naam'],
                                             bijdrager['Voornaam'])
                result += 'email=%s\n' % bijdrager['E-mail']
                result += 'organisatie=%s\n' % bijdrager['Affiliatie']
                result += 'website=%s\n' % bijdrager['Website']

    return result
Пример #29
0
def test_dash_suffixed_method_aliases():
    methods = _.filter_(pydash_methods, lambda m: m.endswith('_'))
    assert methods

    for method in methods:
        assert getattr(_._, method[:-1]) is getattr(_, method)
def first_id_field(field_type):
    fields = filter_(list(field_type.fields.keys()), lambda t: inner_type(field_type.fields[t].type).name == 'ID')
    if len(fields) > 0:
        return field_type.fields[fields[0]]
    else:
        return None
Пример #31
0
def get_combination(combinations, name):
    return pydash.filter_(combinations, lambda x: x[NAME] == name)[0]
def first_non_null_field(field_type):
    fields = filter_(list(field_type.fields.keys()), lambda t: type(field_type.fields[t]).__name__ == 'GraphQLNonNull')
    if len(fields) > 0:
        return field_type.fields[fields[0]]
    else:
        return None
# print("Note : Default is  [1] : Get All Repositories\n")

choice = None
isPrivate = None

while True:
    try:
        choice = int(input("Enter Your Choice : "))
        isPrivate = repo_select(choice)

        if isPrivate == "Invalid Selection":
            continue

    except ValueError:
        print("Invalid Selection! Try again.")
        print(isPrivate)
        continue

    else:
        break

loop_request(init_api_url)

if isPrivate != "All":
    print(json.dumps(_.filter_(repo_data, {"private": isPrivate}), indent=4))
    write_data_to_file('outdata.json',
                       (_.filter_(repo_data, {"private": isPrivate})))
else:
    print(json.dumps(repo_data, indent=4))
    write_data_to_file('outdata.json', repo_data)
Пример #34
0
def test_dash_suffixed_method_aliases():
    methods = _.filter_(pydash_methods, lambda m: m.endswith('_'))
    assert methods

    for method in methods:
        assert getattr(_._, method[:-1]) is getattr(_, method)
Пример #35
0
# -*- coding: utf-8 -*-

from copy import deepcopy

import pydash as _

from .fixtures import parametrize

pydash_methods = _.filter_(dir(_), lambda m: callable(getattr(_, m, None)))


def test_chaining_methods():
    chain = _.chain([])

    for method in dir(_):
        if not callable(method):
            continue

        chained = getattr(chain, method)
        assert chained.method is getattr(_, method)


@parametrize('value,methods',
             [([1, 2, 3, 4], [('without', (2, 3)),
                              ('reject', (lambda x: x > 1, ))])])
def test_chaining(value, methods):
    expected = deepcopy(value)
    actual = _.chain(deepcopy(value))

    for method, args in methods:
        expected = getattr(_, method)(expected, *args)
Пример #36
0
def test_filter_(case, expected):
    assert _.filter_(*case) == expected