def get(self): req = request.args.to_dict() if not has(req, 'query'): return FactoryInvalid.responseInvalid( {'msg': 'Query params is needed'}, 422) if not has(req, 'query.roles._id'): return FactoryInvalid.responseInvalid( {'msg': 'Must have owner id'}, 422) pagination = defaults(req, { 'limit': os.environ.get("MAESTRO_SCAN_QTD", 200), 'page': 1 }) limit = int(pagination['limit']) page = int(pagination['page']) skip = (page - 1) * limit query = json.loads(req['query']) args = FilterAPI() \ .addBatchFilters(query) \ .make() count = self.entity().count(args) return { 'found': self.entity().count(args), 'total_pages': ceil(count / limit), 'page': page, 'limit': limit, 'items': self.entity().getAll(args, limit, skip) }
def check_ids(self): reply, name, year, imdb_movie, tmdb_movie_info, matched_imdb, matched_tmdb = "", None, None, None, None, False, False if has(self.mediainfo, 'general.0.movie_name'): movie_name = re.search(r'^(.+)\((\d{4})\)', self.mediainfo['general'][0]['movie_name']) if movie_name: name = movie_name.group(1).strip() year = movie_name.group(2).strip() if has(self.mediainfo, 'general.0.imdb'): imdb_id = ''.join( re.findall(r'[\d]+', self.mediainfo['general'][0]['imdb'])) try: imdb_movie = ia.get_movie(imdb_id) except imdb._exceptions.IMDbParserError: reply += self._print_report( "error", "Invalid IMDB id: `" + self.mediainfo['general'][0]['imdb'] + "`\n") else: if name == imdb_movie['title'] and year == str( imdb_movie['year']): reply += self._print_report( "correct", "Matched IMDB name and year\n") matched_imdb = True if has(self.mediainfo, 'general.0.tmdb'): tmdb_id = ''.join( re.findall(r'[\d]+', self.mediainfo['general'][0]['tmdb'])) tmdb_movie = tmdb.Movies(tmdb_id) try: tmdb_movie_info = tmdb_movie.info() except requests.exceptions.HTTPError: reply += self._print_report( "error", "Invalid TMDB id: `" + self.mediainfo['general'][0]['tmdb'] + "`\n") else: datetime_obj = datetime.datetime.strptime( tmdb_movie_info['release_date'], '%Y-%m-%d') tmdb_year = str(datetime_obj.year) if name == tmdb_movie_info[ 'original_title'] and year == tmdb_year: reply += self._print_report( "correct", "Matched TMDB name and year\n") matched_tmdb = True if not matched_imdb and not matched_tmdb: if imdb_movie and has_many(imdb_movie, None, ['title', 'year']): reply += self._print_report( "error", "IMDB: Name: `" + imdb_movie['title'] + "` Year: `" + str(imdb_movie['year']) + "`\n") if tmdb_movie_info and 'original_title' in tmdb_movie_info and tmdb_year: reply += self._print_report( "error", "TMDB: Name: `" + tmdb_movie_info['original_title'] + "` Year: `" + tmdb_year + "`\n") return reply
def groupby( self, keys: Union[List[str], str], criteria: Optional[Dict] = None, properties: Union[Dict, List, None] = None, sort: Optional[Dict[str, Union[Sort, int]]] = None, skip: int = 0, limit: int = 0, ) -> Iterator[Tuple[Dict, List[Dict]]]: """ Simple grouping function that will group documents by keys. Will only work if the keys are included in the files collection for GridFS Args: keys: fields to group documents criteria: PyMongo filter for documents to search in properties: properties to return in grouped documents sort: Dictionary of sort order for fields. Keys are field names and values are 1 for ascending or -1 for descending. skip: number documents to skip limit: limit on total number of documents returned Returns: generator returning tuples of (dict, list of docs) """ criteria = ( self.transform_criteria(criteria) if isinstance(criteria, dict) else criteria ) keys = [keys] if not isinstance(keys, list) else keys keys = [ f"metadata.{k}" if k not in files_collection_fields and not k.startswith("metadata.") else k for k in keys ] for group, ids in self._files_store.groupby( keys, criteria=criteria, properties=[f"metadata.{self.key}"] ): ids = [ get(doc, f"metadata.{self.key}") for doc in ids if has(doc, f"metadata.{self.key}") ] group = { k.replace("metadata.", ""): get(group, k) for k in keys if has(group, k) } yield group, list(self.query(criteria={self.key: {"$in": ids}}))
def _process_file_cfg(cls): try: from ruamel.yaml import YAML except ImportError: LOGGER.debug("YAML Parsing not Available") return yaml = YAML(typ="unsafe", pure=True) yaml_path = os.path.join(cls.BASE_DIR, cls.BASE_CFG_FILE) yaml_pathlib = pathlib.Path(yaml_path) if not (yaml_pathlib.exists() or yaml_pathlib.is_file()): LOGGER.debug("YAML Config File not Available") return config_dict = yaml.load(yaml_pathlib) # type: dict config_dict = config_dict if isinstance(config_dict, dict) else dict() config_dict_cls = cls.as_dict() white_list = ("ROOT_DIR", "BASE_DIR", "BASE_CFG_FILE") if find_spec("pydash"): import pydash for path, value in six.iteritems(config_dict): upper_key = str.upper(path) if upper_key not in white_list and pydash.has( config_dict_cls, upper_key): setattr(cls, upper_key, value) else: for key, value in six.iteritems(config_dict): upper_key = str.upper(key) if upper_key in config_dict_cls and upper_key not in white_list: setattr(cls, upper_key, value) LOGGER.info("YAML Config %s was Loaded" % yaml_path)
def __init__(self, data: dict, kind: str, options: dict = None, colorscheme: str = None, zoom: bool = True): super().__init__() self.data = data self.kind = kind self.options = options if options else {} self.colorscheme = colorscheme self.zoom = zoom # Check user input self._validate_input() # Set default style and options self._set_default_options() if not colorscheme and not has(options, 'plugins.colorschemes.scheme'): self._set_default_style() # Set synced arguments self._options = self.options self._data = self.data self._type = self.kind self._colorscheme = self.colorscheme self._zoom = self.zoom
def check_movie_name(self): reply = "" if has(self.mediainfo, 'general.0.movie_name'): # tv show name in format "Name - S01E01" if re.search(r'^.+\s-\sS\d{2}E\d{2}', self.mediainfo['general'][0]['movie_name']): reply += self._print_report( "correct", "TV show name format `Name - S01E01`: " + self.mediainfo['general'][0]['movie_name'] + "\n") # movie name in format "Name (Year)" elif re.search(r'^.+\(\d{4}\)', self.mediainfo['general'][0]['movie_name']): reply += self._print_report( "correct", "Movie name format `Name (Year)`: " + self.mediainfo['general'][0]['movie_name'] + "\n") else: reply += self._print_report( "error", "Movie name does not match format `Name (Year)`: " + self.mediainfo['general'][0]['movie_name'] + "\n") else: reply += self._print_report("error", "Missing movie name\n") return reply
def get_details(self) -> pd.DataFrame: """ Get basket details :return: dataframe containing current basket properties **Usage** Get basket's current state **Examples** Get basket details: >>> from gs_quant.markets.baskets import Basket >>> >>> basket = Basket.get("GSMBXXXX") >>> basket.get_details() """ props = list(CustomBasketsPricingParameters.properties().union( PublishParameters.properties(), CustomBasketsCreateInputs.properties())) props = sorted(props) details = [{ 'name': k, 'value': get(self, k) } for k in props if has(self, k)] return pd.DataFrame(details)
def check_text_default_flag(self): # english subs for foreign films should be default=yes reply = "" if len(self.mediainfo['text']) > 0: first_audio_language, has_english_subs, english_subs_index = False, False, False if has(self.mediainfo, 'audio.0.language'): first_audio_language = self.mediainfo['audio'][0][ 'language'].lower() if first_audio_language != 'english': for i, item in enumerate(self.mediainfo['text']): if 'language' in item: if item['language'].lower() == 'english': has_english_subs, english_subs_index = True, i if has_english_subs: # foreign audio and has english subs. english subs should be default=yes if self.mediainfo['text'][english_subs_index][ 'default'].lower() == 'yes': reply += self._print_report( "correct", "Foreign film with English subs `default=yes`\n") else: reply += self._print_report( "error", "English subs on foreign film should be `default=yes`\n" ) return reply
def post(self): req = request.get_json(force=True) pagination = defaults(req, { 'limit': os.environ.get("MAESTRO_SCAN_QTD", 200), 'page': 1 }) limit = int(pagination['limit']) page = int(pagination['page']) skip = (page - 1) * limit query = {} if has(req, 'query'): query = json.loads(req['query']) args = FilterAPI() \ .addBatchFilters(query) \ .make() count = self.entity().count(args) return { 'found': count, 'total_pages': ceil(count / limit) + 1, 'page': page, 'limit': limit, 'items': self.entity().getAll(args, limit, skip) }
def _it_doc_process(doc): doc = {**doc, **track_util.url_split(doc.get('uri'))} if doc.get('senderToUserId') and _.get(doc, 'params.sender'): doc['salesId'] = str(_.get(doc, 'params.sender')) doc['params']['sender'] = str(doc['senderToUserId']) if _.has(doc, 'params.secondlevel'): doc['_parent'] = _.get(doc, 'params.secondlevel') return doc
async def get_matched_paths(path: str, db: AsyncIOMotorCollection): matches = [] async for ctx in db.find({}): if pydash.has(ctx, 'path'): match = re.match(ctx['path'], path) match and matches.append(pydash.merge( ctx, {'regex_groups': match.groups()})) return matches
def has_many(obj, base, keys): for key in keys: lookup = '' if base: lookup += base + '.' lookup += key if not has(obj, lookup): return False return True
def _inner(self, *args, **kwargs): if has(self, '_Basket__error_messages' ) and self._Basket__error_messages is not None: if len(self._Basket__error_messages) < 1: self._Basket__finish_initialization() for error_msg in error_msgs: if error_msg in self._Basket__error_messages: raise MqError(error_msg.value) return fn(self, *args, **kwargs)
def is_match(obj, source, callback=None): """Performs a comparison between `obj` and `source` to determine if `obj` contains equivalent property values as `source`. If a callback is provided it will be executed to compare values. If the callback returns ``None``, comparisons will be handled by the method instead. The callback is invoked with two arguments: ``(obj, source)``. Args: obj (list|dict): Object to compare. source (list|dict): Object of property values to match. callback (mixed, optional): Callback used to compare values from `obj` and `source`. Returns: bool: Whether `obj` is a match or not. Example: >>> is_match({'a': 1, 'b': 2}, {'b': 2}) True >>> is_match({'a': 1, 'b': 2}, {'b': 3}) False >>> is_match({'a': [{'b': [{'c': 3, 'd': 4}]}]},\ {'a': [{'b': [{'d': 4}]}]}) True .. versionadded:: 3.0.0 .. versionchanged:: 3.2.0 Don't compare `obj` and `source` using ``type``. Use ``isinstance`` exclusively. """ # If callback provided, use it for comparision. equal = callback(obj, source) if callable(callback) else None # Return callback results if anything but None. if equal is not None: pass elif (isinstance(obj, dict) and isinstance(source, dict) or isinstance(obj, list) and isinstance(source, list) or isinstance(obj, tuple) and isinstance(source, tuple)): # Walk a/b to determine equality. for key, value in iterator(source): if pyd.has(obj, key): equal = is_match(obj[key], value, callback) else: equal = False if not equal: break else: # Use basic == comparision. equal = obj == source return equal
def update( self, docs: Union[List[Dict], Dict], key: Union[List, str, None] = None, additional_metadata: Union[str, List[str], None] = None, ): """ Update documents into the Store Args: docs: the document or list of documents to update key: field name(s) to determine uniqueness for a document, can be a list of multiple fields, a single field, or None if the Store's key field is to be used additional_metadata: field(s) to include in the gridfs metadata """ if not isinstance(docs, list): docs = [docs] if isinstance(key, str): key = [key] elif not key: key = [self.key] key = list(set(key) - set(files_collection_fields)) if additional_metadata is None: additional_metadata = [] elif isinstance(additional_metadata, str): additional_metadata = [additional_metadata] else: additional_metadata = list(additional_metadata) for d in docs: search_doc = {k: d[k] for k in key} metadata = { k: get(d, k) for k in [self.last_updated_field] + additional_metadata + self.searchable_fields if has(d, k) } metadata.update(search_doc) data = json.dumps(jsanitize(d)).encode("UTF-8") if self.compression: data = zlib.compress(data) metadata["compression"] = "zlib" self._collection.put(data, metadata=metadata) search_doc = self.transform_criteria(search_doc) # Cleans up old gridfs entries for fdoc in (self._files_collection.find(search_doc, ["_id"]).sort( "uploadDate", -1).skip(1)): self._collection.delete(fdoc["_id"])
def groupby( self, keys: Union[List[str], str], criteria: Optional[Dict] = None, properties: Union[Dict, List, None] = None, sort: Optional[Dict[str, Union[Sort, int]]] = None, skip: int = 0, limit: int = 0, ) -> Iterator[Tuple[Dict, List[Dict]]]: """ Simple grouping function that will group documents by keys. Args: keys: fields to group documents criteria: PyMongo filter for documents to search in properties: properties to return in grouped documents sort: Dictionary of sort order for fields. Keys are field names and values are 1 for ascending or -1 for descending. skip: number documents to skip limit: limit on total number of documents returned Returns: generator returning tuples of (key, list of docs) """ pipeline = [] if isinstance(keys, str): keys = [keys] if properties is None: properties = [] if isinstance(properties, dict): properties = list(properties.keys()) if criteria is not None: pipeline.append({"$match": criteria}) if len(properties) > 0: pipeline.append({"$project": {p: 1 for p in properties + keys}}) alpha = "abcdefghijklmnopqrstuvwxyz" group_id = {letter: f"${key}" for letter, key in zip(alpha, keys)} pipeline.append( {"$group": { "_id": group_id, "docs": { "$push": "$$ROOT" } }}) for d in self._collection.aggregate(pipeline, allowDiskUse=True): id_doc = {} # type: Dict[str,Any] for letter, key in group_id.items(): if has(d["_id"], letter): set_(id_doc, key[1:], d["_id"][letter]) yield (id_doc, d["docs"])
def is_equal(value, other, callback=None): """Performs a comparison between two values to determine if they are equivalent to each other. If a callback is provided it will be executed to compare values. If the callback returns ``None``, comparisons will be handled by the method instead. The callback is invoked with two arguments: ``(value, other)``. Args: value (list|dict): Object to compare. other (list|dict): Object to compare. callback (mixed, optional): Callback used to compare values from `value` and `other`. Returns: bool: Whether `value` and `other` are equal. Example: >>> is_equal([1, 2, 3], [1, 2, 3]) True >>> is_equal('a', 'A') False >>> is_equal('a', 'A', lambda a, b: a.lower() == b.lower()) True .. versionadded:: 1.0.0 """ # If callback provided, use it for comparision. equal = callback(value, other) if callable(callback) else None # Return callback results if anything but None. if equal is not None: pass elif ( callable(callback) and type(value) is type(other) and isinstance(value, (list, dict)) and isinstance(other, (list, dict)) and len(value) == len(other) ): # Walk a/b to determine equality using callback. for key, value in iterator(value): if pyd.has(other, key): equal = is_equal(value, other[key], callback) else: equal = False if not equal: break else: # Use basic == comparision. equal = value == other return equal
def best_match(entities: list): best = { 'regex_groups': () } for entity in entities: if pydash.has( entity, 'regex_groups') and len( entity['regex_groups']) > len( best['regex_groups']): best = entity return not pydash.is_empty(best['regex_groups']) and best or None
def bulk_error_process(items): # errors = [] # for item in items: # for value in item.values(): # if _.get(value, 'error'): # errors.append(value) # return errors return [ value for item in items for value in item.values if _.has(value, 'error') ]
def is_equal(value, other, callback=None): """Performs a comparison between two values to determine if they are equivalent to each other. If a callback is provided it will be executed to compare values. If the callback returns ``None``, comparisons will be handled by the method instead. The callback is invoked with two arguments: ``(value, other)``. Args: value (list|dict): Object to compare. other (list|dict): Object to compare. callback (mixed, optional): Callback used to compare values from `value` and `other`. Returns: bool: Whether `value` and `other` are equal. Example: >>> is_equal([1, 2, 3], [1, 2, 3]) True >>> is_equal('a', 'A') False >>> is_equal('a', 'A', lambda a, b: a.lower() == b.lower()) True .. versionadded:: 1.0.0 """ # If callback provided, use it for comparision. equal = callback(value, other) if callable(callback) else None # Return callback results if anything but None. if equal is not None: pass elif (callable(callback) and type(value) is type(other) and isinstance(value, (list, dict)) and isinstance(other, (list, dict)) and len(value) == len(other)): # Walk a/b to determine equality using callback. for key, value in iterator(value): if pyd.has(other, key): equal = is_equal(value, other[key], callback) else: equal = False if not equal: break else: # Use basic == comparision. equal = value == other return equal
def is_equal_with(value, other, customizer): """This method is like :func:`is_equal` except that it accepts customizer which is invoked to compare values. A customizer is provided which will be executed to compare values. If the customizer returns ``None``, comparisons will be handled by the method instead. The customizer is invoked with two arguments: ``(value, other)``. Args: value (list|dict): Object to compare. other (list|dict): Object to compare. customizer (mixed, optional): Customizer used to compare values from `value` and `other`. Returns: bool: Whether `value` and `other` are equal. Example: >>> is_equal_with([1, 2, 3], [1, 2, 3], None) True >>> is_equal_with('a', 'A', None) False >>> is_equal_with('a', 'A', lambda a, b: a.lower() == b.lower()) True .. versionadded:: 4.0.0 """ # If customizer provided, use it for comparision. equal = customizer(value, other) if callable(customizer) else None # Return customizer results if anything but None. if equal is not None: pass elif (callable(customizer) and type(value) is type(other) and isinstance(value, (list, dict)) and isinstance(other, (list, dict)) and len(value) == len(other)): # Walk a/b to determine equality using customizer. for key, value in iterator(value): if pyd.has(other, key): equal = is_equal_with(value, other[key], customizer) else: equal = False if not equal: break else: # Use basic == comparision. equal = value == other return equal
def pose_to_mono_pose(map, pose): grid_map = pydash.get(map, 'metadata.gridmap') if pydash.has(grid_map, 'origin') is False: raise Exception('metadata.gridmap.origin is undefined') if pydash.has(grid_map, 'scale_m2px') is False: raise Exception('metadata.gridmap.scale.m2px is undefined') monochrome = pydash.get(map, 'metadata.monochrome') if monochrome is None: raise Exception('metadata.monochrom is undefined') map_point = { 'x': grid_map['origin']['x'] + pose['x']*grid_map['scale_m2px'], 'y': grid_map['origin']['y'] - pose['y']*grid_map['scale_m2px'] } return{ 'x': round(map_point['x'] * monochrome['width']/grid_map['width'], FIXED_DIGITS), 'y': round(map_point['y'] * monochrome['height']/grid_map['height'], FIXED_DIGITS) }
def __finish_initialization(self): """ Fetches remaining data not retrieved during basket initialization """ if has(self, 'id'): if not has(self, '__initial_positions'): position_set = GsAssetApi.get_latest_positions( self.id, PositionType.ANY) position_set = PositionSet.from_target(position_set) self.__position_set = position_set self.__divisor = get(position_set, 'divisor') self.__initial_positions = set( deepcopy(self.__position_set.positions)) set_(self.__initial_state, 'divisor', self.__divisor) set_(self.__initial_state, 'position_set', self.__position_set) if not has(self.__initial_state, 'initial_price'): initial_price = GsIndexApi.initial_price( self.id, dt.date.today()) self.__initial_price = get(initial_price, 'price') set_(self.__initial_state, 'initial_price', self.__initial_price) if not has(self.__initial_state, 'publish_to_bloomberg'): report = get(self, '__latest_create_report', self.__get_latest_create_report()) self.__publish_to_bloomberg = get( report, 'parameters.publish_to_bloomberg') self.__publish_to_factset = get( report, 'parameters.publish_to_factset') self.__publish_to_reuters = get( report, 'parameters.publish_to_reuters') set_(self.__initial_state, 'publish_to_bloomberg', self.__publish_to_bloomberg) set_(self.__initial_state, 'publish_to_factset', self.__publish_to_factset) set_(self.__initial_state, 'publish_to_reuters', self.__publish_to_reuters) if not has(self, '__entitlements'): self.__entitlements = BasketEntitlements.from_target( self.__initial_entitlements) self.__set_error_messages()
def check_video_language_matches_first_audio_language(self): reply = "" if not has(self.mediainfo, 'video.0.language'): reply += self._print_report("error", "Video language not set" + "\n") return reply if not has(self.mediainfo, 'audio.0.language'): reply += self._print_report("error", "First audio language not set" + "\n") return reply if self.mediainfo['video'][0]['language'] == self.mediainfo['audio'][ 0]['language']: reply += self._print_report( "correct", "Video language matches first audio language: `" + self.mediainfo['video'][0]['language'] + "`\n") else: reply += self._print_report( "error", "Video language does not match first audio language: `" + self.mediainfo['video'][0]['language'] + "` vs `" + self.mediainfo['audio'][0]['language'] + "`\n") return reply
def test_Objects (self): #Get all objects from adapter r = requests.get('http://localhost:' + str(globals.config['vicinity_adapter_port']) + '/adapter/objects', \ headers={"Content-Type": "application/json"}) objects = json.loads(r.text) #Validate TDs r = requests.post(globals.config['neighbourhood_manager_endpoint'] + '/api/repository/validate', \ data=json.dumps(objects), \ headers={"Content-Type": "application/json"}) output = json.loads(r.text) print(json.dumps(output, indent=2)) map = py_.map_(output['data'], lambda x: py_.has(x, 'errors')) self.assertTrue(False in map, 'Some error(s) have been triggered during the validation process')
async def remove_status_codes(status_codes: list, _id: str, db: AioRedis): """ removes status codes to endpoint cache @param status_codes: (list) status codes to remove @param id: (str) id of endpoint cache @param db: db connection """ endpoint_cache = await db.hgetall(_id, encoding='utf-8') if not pydash.has(endpoint_cache, 'response_codes'): raise Exception({ 'message': f'Unable to update cache {_id}', 'status_code': 400 }) for status_code in status_codes: await db.srem(endpoint_cache['response_codes'], status_code)
def handle(err: Exception) -> web.Response: """ handles exceptions in controllers @param err: (Exception) error to handle @returns: json respone """ err_ctx = err.args[0] if pydash.has(err_ctx, 'status_code') is True: return web.json_response(err_ctx, status=err_ctx['status_code']) else: return web.json_response({ 'message': err_ctx, 'status_code': 500 }, status=500)
def get_many_assets_data_scroll(cls, scroll: str = '1m', fields: IdList = None, as_of: dt.datetime = None, limit: int = None, **kwargs) -> dict: query = cls.__create_query(fields, as_of, limit, scroll, **kwargs) response = GsSession.current._post('/assets/data/query', payload=query) results = get(response, 'results') while (has(response, 'scrollId') and len(get(response, 'results'))): query = cls.__create_query(fields, as_of, limit, scroll, get(response, 'scrollId'), **kwargs) response = GsSession.current._post('/assets/data/query', payload=query) results += get(response, 'results') return results
def __set_error_messages(self): """ Errors to check for based on current user/basket state """ if len(get(self, '__error_messages', [])) > 0: return errors = [] user_tokens = get(GsUsersApi.get_current_user_info(), 'tokens', []) if 'internal' not in user_tokens: errors.append(ErrorMessage.NON_INTERNAL) if not has(self, 'id'): errors.append(ErrorMessage.UNINITIALIZED) else: errors.append(ErrorMessage.UNMODIFIABLE) tokens = set(get(self.__initial_entitlements, 'admin', [])) if not any(t in user_tokens for t in tokens): errors.append(ErrorMessage.NON_ADMIN) self.__error_messages = set(errors)
async def _populate(self, l_docs: list, pop_path: PopulatePath): if pop_path.let: # 字段预处理 def _t(doc, k, expr): v = expr(doc) if v: doc = _.set_(doc, k, v) return doc l_docs = [ _t(doc, k, expr) for k, expr in pop_path.let.items() for doc in l_docs ] if pop_path.from_ not in await db.list_collection_names(): raise Exception(f'Collection {pop_path.from_} does NOT exists') l_keys = set([ _.get(doc, pop_path.local_field) for doc in l_docs if _.has(doc, pop_path.local_field) ]) if not l_keys or len(l_keys) <= 0: return l_docs f_docs = { doc.get('_id'): doc async for doc in pop_path.foreign_collection.find( filter={ **pop_path.filter_, **{ pop_path.foreign_field: { '$in': list(l_keys) } } }, projection=pop_path.projection) } del l_keys # 释放资源 for doc in l_docs: _k = _.get(doc, pop_path.local_field) if _k in f_docs: doc.update(_.set_({}, pop_path.as_, f_docs.get(_k))) return l_docs
def post(self): valid = aggregateValidate().validate() if valid: try: pipeline = json.loads(valid['pipeline']) entity = valid['entity'] except Exception as error: return FactoryInvalid.responseInvalid({'msg': str(error)}, 422) if not has(pipeline, '[0].$match.roles\._id'): return FactoryInvalid.responseInvalid( { 'msg': 'Must delimite $match which roles._id ($match {role.id})' }, 422) args = map_values_deep(pipeline, updaterIds) return {'items': Aggregate(entity).pipeline(args)}
def _conforms(obj): for key, predicate in iterator(source): if not pyd.has(obj, key) or not predicate(obj[key]): return False return True
def test_has(case, expected): assert _.has(*case) == expected
def sort_by_all(collection, keys, orders=None, reverse=False): """This method is like :func:`sort_by` except that it sorts by key names instead of an iteratee function. Keys can be sorted in descending order by prepending a ``"-"`` to the key name (e.g. ``"name"`` would become ``"-name"``) or by passing a list of boolean sort options via `orders` where ``True`` is ascending and ``False`` is descending. Args: collection (list|dict): Collection to iterate over. keys (list): List of keys to sort by. By default, keys will be sorted in ascending order. To sort a key in descending order, prepend a ``"-"`` to the key name. For example, to sort the key value for ``"name"`` in descending order, use ``"-name"``. orders (list, optional): List of boolean sort orders to apply for each key. ``True`` corresponds to ascending order while ``False`` is descending. Defaults to ``None``. reverse (bool, optional): Whether to reverse the sort. Defaults to ``False``. Returns: list: Sorted list. Example: >>> items = [{'a': 2, 'b': 1}, {'a': 3, 'b': 2}, {'a': 1, 'b': 3}] >>> results = sort_by_all(items, ['b', 'a']) >>> assert results == [{'a': 2, 'b': 1},\ {'a': 3, 'b': 2},\ {'a': 1, 'b': 3}] >>> results = sort_by_all(items, ['a', 'b']) >>> assert results == [{'a': 1, 'b': 3},\ {'a': 2, 'b': 1},\ {'a': 3, 'b': 2}] >>> results = sort_by_all(items, ['-a', 'b']) >>> assert results == [{'a': 3, 'b': 2},\ {'a': 2, 'b': 1},\ {'a': 1, 'b': 3}] >>> results = sort_by_all(items, ['a', 'b'], [False, True]) >>> assert results == [{'a': 3, 'b': 2},\ {'a': 2, 'b': 1},\ {'a': 1, 'b': 3}] See Also: - :func:`sort_by_all` (main definition) - :func:`sort_by_order` (alias) .. versionadded:: 3.0.0 .. versionchanged:: 3.2.0 Added `orders` argument. .. verionchanged:: 3.2.0 Added :func:`sort_by_order` as alias. """ if isinstance(collection, dict): collection = collection.values() # Maintain backwards compatibility. if pyd.is_bool(orders): reverse = orders orders = None comparers = [] if orders: for i, key in enumerate(keys): if pyd.has(orders, i): order = 1 if orders[i] else -1 else: order = 1 comparers.append((pyd.deep_prop(key), order)) else: for key in keys: if key.startswith('-'): order = -1 key = key[1:] else: order = 1 comparers.append((pyd.deep_prop(key), order)) def comparison(left, right): # pylint: disable=useless-else-on-loop,missing-docstring for func, mult in comparers: result = _cmp(func(left), func(right)) if result: return mult * result else: return 0 return sorted(collection, key=cmp_to_key(comparison), reverse=reverse)