def __init__(self, object, format='TEXT', _hsid=0, _hflags=0): SON.__init__(self) self.hdr = Hdr(144, _hsid, 0, _hflags) self.type = 144 self['t'] = 144 self['object'] = object self['format'] = format
def specific_config_gen(IC,args): IC.base_dir = args['base_dir'] IC.annotate_dir = args['annotate_dir'] IC.groundtruth_dir = args['groundtruth_dir'] IC.correspondence = tb.tabarray(SVfile = args['frame_correspondence']) IC.size = args['size'] IC.prefix = prefix = args.get('image_extension','.jpg') IC.current_frame_path = None csvs = [x for x in os.listdir(IC.annotate_dir) if x.endswith('.csv')] csvs.sort() Xs = [tb.tabarray(SVfile = os.path.join(IC.annotate_dir,csv)) for csv in csvs] cns = [csv.split('.')[0] for csv in csvs] cns = [[cn]*len(X) for (cn,X) in zip(cns,Xs)] Xs = [X.addcols(cn,names=['clip_num']) for (cn,X) in zip(cns,Xs)] csvs = [x for x in os.listdir(IC.groundtruth_dir) if x.endswith('.csv')] csvs.sort() Gs = [] fields = ['clip_num','Frame'] + xfields + yfields for ind,csv in enumerate(csvs): try: g = tb.tabarray(SVfile = os.path.join(IC.groundtruth_dir,csv)) except: x = Xs[ind].addcols([-1]*len(Xs[ind]),names=['Correctness']) else: g = g.addcols([csv.split('.')[0]]*len(g),names = ['clip_num']) g = g[fields + ['Confidence']] g.renamecol('Confidence','Correctness') x = Xs[ind].join(g,keycols=fields) Gs.append(x) X = tb.tab_rowstack(Gs) X.sort(order=['clip_num','Frame']) Y = IC.correspondence F = tb.fast.recarrayisin(Y[['clip_num','Frame']],X[['clip_num','Frame']]) Y = Y[F] X = X.join(Y,keycols=['clip_num','Frame']) params = [] for t in X: print(t) cn = t['clip_num'] fr = t['Frame'] box = get_darpa_box(t) bb = box.pop('box') xc,yc = bb.center center = correct_center((xc,yc),IC.size,(1920,1080)) bb_new = bbox.BoundingBox(center = center,width = IC.size[0], height = IC.size[1]) p = SON([('size',IC.size), ('bounding_box',SON([('xfields',list(bb_new.xs)),('yfields',list(bb_new.ys))])), ('original_bounding_box',SON([('xfields',list(bb.xs)),('yfields',list(bb.ys))])), ('clip_num',cn), ('Frame',int(t['Original'])), ('base_dir',IC.base_dir), ('correctness',int(t['Correctness']))]) p.update(box) p['GuessObjectType'] = p['ObjectType'] p['ObjectType'] = p['ObjectType'] if t['Correctness'] == 1 else '' params.append(SON([('image',p)])) return params
def __init__(self, text, format='TEXT', _hsid=0, _hflags=0): SON.__init__(self) self.hdr = Hdr(140, _hsid, 0, _hflags) self.type = 140 self['t'] = 140 self['text'] = text self['format'] = format
def __init__(self, text, format, completions, _hsid=0, _hflags=0): SON.__init__(self) self.hdr = Hdr(141, _hsid, 0, _hflags) self.type = 141 self['t'] = 141 self['text'] = text self['format'] = format self['completions'] = completions
def __init__(self, object, format, obj_found=False, doc=None, _hsid=0, _hflags=0): SON.__init__(self) self.hdr = Hdr(143, _hsid, 0, _hflags) self.type = 143 self['t'] = 143 self['object'] = object self['format'] = format self['obj_found'] = obj_found self['doc'] = doc
def __init__(self, object, format, obj_found=False, source=None, _hsid=0, _hflags=0): SON.__init__(self) self.hdr = Hdr(145, _hsid, 0, _hflags) self.type = 145 self['t'] = 145 self['object'] = object self['format'] = format self['obj_found'] = obj_found self['source'] = source
def to_mongo(self, use_db_field=True, fields=None): """ Return as SON data ready for use with MongoDB. """ if not fields: fields = [] data = SON() data['_id'] = None data['_cls'] = self._class_name # only root fields ['test1.a', 'test2'] => ['test1', 'test2'] root_fields = {f.split('.')[0] for f in fields} for field_name in self: if root_fields and field_name not in root_fields: continue value = self._data.get(field_name, None) field = self._fields.get(field_name) if field is None and self._dynamic: field = self._dynamic_fields.get(field_name) if value is not None: f_inputs = field.to_mongo.__code__.co_varnames ex_vars = {} if fields and 'fields' in f_inputs: key = '%s.' % field_name embedded_fields = [ i.replace(key, '') for i in fields if i.startswith(key)] ex_vars['fields'] = embedded_fields if 'use_db_field' in f_inputs: ex_vars['use_db_field'] = use_db_field value = field.to_mongo(value, **ex_vars) # Handle self generating fields if value is None and field._auto_gen: value = field.generate() self._data[field_name] = value if (value is not None) or (field.null): if use_db_field: data[field.db_field] = value else: data[field.name] = value # Only add _cls if allow_inheritance is True if not self._meta.get('allow_inheritance'): data.pop('_cls') return data
def __init__(self, stderr, stack=None, etype=None, value=None, syntax=None, _hsid=0, _hflags=0): SON.__init__(self) self.hdr = Hdr(10, _hsid, 0, _hflags) self.type = 10 self['t'] = 10 self['stderr'] = stderr self['stack'] = stack self['etype'] = etype self['value'] = value self['syntax'] = syntax
def command(self, command, value=1, read_preference=None, callback=None, check=True, allowable_errors=[], connection=None, **kwargs): """Issue a MongoDB command. Send command `command` to the database and return the response. If `command` is an instance of :class:`basestring` then the command {`command`: `value`} will be sent. Otherwise, `command` must be an instance of :class:`dict` and will be sent as is. Any additional keyword arguments will be added to the final command document before it is sent. For example, a command like ``{buildinfo: 1}`` can be sent using: >>> db.command("buildinfo") For a command where the value matters, like ``{collstats: collection_name}`` we can do: >>> db.command("collstats", collection_name) For commands that take additional arguments we can use kwargs. So ``{filemd5: object_id, root: file_root}`` becomes: >>> db.command("filemd5", object_id, root=file_root) :Parameters: - `command`: document representing the command to be issued, or the name of the command (for simple commands only). .. note:: the order of keys in the `command` document is significant (the "verb" must come first), so commands which require multiple keys (e.g. `findandmodify`) should use an instance of :class:`~bson.son.SON` or a string and kwargs instead of a Python `dict`. - `value` (optional): value to use for the command verb when `command` is passed as a string - `**kwargs` (optional): additional keyword arguments will be added to the command document before it is sent """ if isinstance(command, basestring): command = SON([(command, value)]) command.update(kwargs) if read_preference is None: read_preference = self._read_preference Client(self, '$cmd').find_one(command, is_command=True, connection=connection, read_preference=read_preference, callback=callback)
def near(col, params): if params is None or params.count(':') != 2: raise TypeError("$near requires three arguments. Use like /%s/$near=-73.10:42.18:0.5/ to return all records within a 0.5-mile radius of %s" % (col, col)) params = params.split(":") params[0] = float(params[0]) params[1] = float(params[1]) params[2] = float(params[2]) / 69.0 near_dict = {"$near": [params[0], params[1]]} dist_dict = {"$maxDistance": params[2]} q = SON(near_dict) q.update(dist_dict) return q
def __init__(self, source, cid=0, echo_stdin=True, displayhook='LAST', assignhook='NONE', print_ast=False, except_msg=False, _hsid=0, _hflags=0): SON.__init__(self) self.hdr = Hdr(120, _hsid, 0, _hflags) self.type = 120 self['t'] = 120 self['source'] = source self['cid'] = cid self['echo_stdin'] = echo_stdin self['displayhook'] = displayhook self['assignhook'] = assignhook self['print_ast'] = print_ast self['except_msg'] = except_msg
def generate_splits(task_config,hash,colname): base_query = SON([('__hash__',hash)]) ntrain = task_config['ntrain'] ntest = task_config['ntest'] ntrain_pos = task_config.get('ntrain_pos') ntest_pos = task_config.get('ntest_pos') N = task_config.get('N',10) query = task_config['query'] base_query.update(reach_in('config',task_config.get('universe',SON([])))) cquery = reach_in('config',query) print('q',cquery) print('u',base_query) return traintest.generate_split2(DB_NAME,colname,cquery,N,ntrain,ntest,ntrain_pos=ntrain_pos,ntest_pos = ntest_pos,universe=base_query,use_negate = True)
def put_in_performance(split_results,image_config_gen,m,model_hash,image_hash,perf_coll,task,ext_hash): model_results = SON([]) for stat in STATS: if stat in split_results[0] and split_results[0][stat] != None: model_results[stat] = sp.array([split_result[stat] for split_result in split_results]).mean() out_record = SON([('model',m['config']['model']), ('model_hash',model_hash), ('model_filename',m['filename']), ('images',son_escape(image_config_gen['images'])), ('image_hash',image_hash), ('task',son_escape(task)), ('__hash__',ext_hash) ]) out_record.update(model_results) perf_coll.insert(out_record)
def put_in_split_result(res,image_config_gen,m,task,ext_hash,split_id,splitres_fs): out_record = SON([('model',m['config']['model']), ('images',son_escape(image_config_gen['images'])), ('task',son_escape(task)), ('split_id',split_id), ]) split_result = SON([]) for stat in STATS: if stat in res and res[stat] != None: split_result[stat] = res[stat] filename = get_filename(out_record) out_record['filename'] = filename out_record['__hash__'] = ext_hash out_record.update(split_result) print('dump out split result...') out_data = cPickle.dumps(SON([('split_result',res)])) splitres_fs.put(out_data,**out_record)
def to_mongo(self): from bson import SON from mongoengine import Document from mongoengine.base.common import ALLOW_INHERITANCE """Return as SON data ready for use with MongoDB. """ data = SON() data["_id"] = None data['_cls'] = self._class_name for field_name in self._fields_ordered: value = self._data.get(field_name, None) field = self._fields.get(field_name) if field is None and self._dynamic: field = self._dynamic_fields.get(field_name) if value is not None: value = field.to_mongo(value) # Handle self generating fields if value is None and field._auto_gen: value = field.generate() self._data[field_name] = value if value is not None: data[field.db_field] = value # If "_id" has not been set, then try and set it if isinstance(self, Document): if data["_id"] is None: data["_id"] = self._data.get("id", None) if data['_id'] is None: data.pop('_id') # Only add _cls if allow_inheritance is True if (not hasattr(self, '_meta') or not self._meta.get('allow_inheritance', ALLOW_INHERITANCE)): data.pop('_cls') return data
def MRsimple(collection, FunMap, FunReduce=None, query={}, out={"replace": 'mr_tmp'}, finalize=None, scope={}, sort=None, jsMode=False, verbose=1): """ simplified generic Map Reduce see: http://docs.mongodb.org/manual/reference/method/db.collection.mapReduce/ returns (MR response object, results collection or results list if out={"inline":1}) Reduce function defaults to one that increments value count optimize by sorting on emit fields see: http://edgystuff.tumblr.com/post/7624019777/optimizing-map-reduce-with-mongodb docs.mongodb.org/manual/reference/method/db.collection.mapReduce/#db.collection.mapReduce sort i.e: sort= { "_id":1 } jsMode should be False if we expect more than 500K dictinct ids """ if len(out.viewkeys()) > 1: command = MRCommand_(out) out = SON([(command, out[command]), ('db', out.get('db')), ('nonAtomic', out.get('nonAtomic', False))]) #nonAtomic not allowed on replace FunMap = Code(FunMap, {}) if FunReduce is None: FunReduce = u"""function (key, values) {var total = 0; for (var i = 0; i < values.length; i++) { total += values[i]; } return total;} """ FunReduce = Code(FunReduce, {}) if verbose > 2: print "Start MRsimple collection = %s"\ "query = %s\nMap=\n%s\nReduce=\n%s\nFinalize=%s\nscope=%s sort=%s" \ % tuple(map(str, (out, query, FunMap, FunReduce, finalize, scope, sort))) if sort: sort = SON(sort) r = collection.map_reduce(FunMap, FunReduce, out=out, query=query, finalize=finalize, scope=scope, sort=sort, full_response=True) if verbose > 1: print "End MRsimple collection=%s, query=%s\nresulsts=\n %s"\ % (collection.name, str(query), str(r)) if 'db' in out.viewkeys(): #@note: can be dict or SON, either way it has property viewkeys results = collection.database.connection[r['result']['db']][r['result']['collection']] else: results = r['results'] if out.keys()[0] == 'inline' else collection.database[r['result']] #@note: results is a list if inline else a collection return r, results
def get_last_error(self, db, **options): command = SON([("getlasterror", 1)]) db = "%s.$cmd" % db.split('.', 1)[0] command.update(options) query = Query(collection=db, query=command) reply = yield self.send_QUERY(query) assert len(reply.documents) == 1 document = reply.documents[0].decode() err = document.get("err", None) code = document.get("code", None) if err is not None: if code == 11000: raise DuplicateKeyError(err, code=code) else: raise OperationFailure(err, code=code) defer.returnValue(document)
def write_outcerts(func, configs, incertdicts, outcertpaths, db): if incertdicts: old_param_names = dict_union([op["param_names"] for op in incertdicts]) else: old_param_names = SON([]) new_param_names = uniqify(ListUnion([x.keys() for x in configs])) for (outcertpath, outroot) in zip(outcertpaths, func.outroots): param_names = old_param_names.copy() param_names[outroot] = new_param_names remove_incorrect(db, outroot, func.outconfig_string, func.outrun_hash) createCertificateDict( outcertpath, { "run_hash": func.outrun_hash, "db": func.dbname, "out_args": func.out_args, "root": outroot, "config_hash": func.outconfig_string, "param_names": param_names, }, )
def to_dict_document(self, serializer=None): data = SON() data["_id"] = None # data['_cls'] = self._class_name for field_name in self: value = self._data.get(field_name, None) field = self._fields.get(field_name) if field is None and self._dynamic: field = self._dynamic_fields.get(field_name) if value and isinstance(field, (ResourceField, ResourceIntField, DynamicResourceField)): pass # value = value.to_dict(fields=field.fields) if value is not None: value = field.to_dict(value, serializer=serializer) # Handle self generating fields if value is None and field._auto_gen: value = field.generate() self._data[field_name] = value if value is not None: data[field.db_field] = value # If "_id" has not been set, then try and set it Document = _import_class("Document") if isinstance(self, Document): if data["_id"] is None: data["_id"] = self._data.get("id", None) if data['_id'] is None: data.pop('_id') # Only add _cls if allow_inheritance is True # if (not hasattr(self, '_meta') or # not self._meta.get('allow_inheritance', ALLOW_INHERITANCE)): # data.pop('_cls') return data
def get_op_gen(op, oplist): if op.get("outcertpaths") is None: func = op["func"] params = op.get("params") inroots = func.inroots outroots = func.outroots if func.action_name == "inject": args = op["params"] out_args = SON([(outroot, params) for outroot in outroots]) else: params = op.get("params", SON([])) parents = [] for ir in inroots: try: parent = [op0 for op0 in oplist if ir in op0["func"].outroots][0] except IndexError: raise IndexError, "No parent found for at least one collection in " + repr(op0["func"].outroots) else: parents.append(parent) for parent in parents: get_op_gen(parent, oplist) in_args = [parent["out_args"] for parent in parents] op["incertpaths"] = [ get_cert_path(func.dbname, inroot, get_config_string(in_arg)) for (inroot, in_arg) in zip(inroots, in_args) ] out_args = dict_union(in_args) out_args.update(params) op["out_args"] = out_args op["outcertpaths"] = [ get_cert_path(func.dbname, outroot, get_config_string(out_args)) for outroot in func.outroots ]
def oid_date_range_filter(dt_from=None, dt_upto=None, field_name='_id'): """ constructs a range query usefull to query an ObjectId field by date :Parameters: - dt_from (datetime or tuple): starting date_time if tuple a datetime is constucted from tuple - dt_upto (datetime or tuple): end date_time if tuple a datetime is constucted from tuple - field_name: (str): optional default to '_id' field to query or None if None returns range only else returns full query :Returns: - range query (due to objectId structure $gt includes dt_from) while $lt dt_upto (not included) """ def dt(dt_or_tuple): if isinstance(dt_or_tuple, datetime): return dt_or_tuple elif isinstance(dt_or_tuple, tuple): return datetime(*dt_or_tuple) else: raise TypeError('dt must be a date or tuple') q = SON() if dt_from is not None: q.update(SON([('$gte', ObjectId.from_datetime(dt(dt_from)))])) if dt_upto is not None: q.update(SON([('$lte', ObjectId.from_datetime(dt(dt_upto)))])) return q if field_name is None else SON([(field_name, q)])
def train_test_loop(outfile,extract_creates,task_config,feature_config_path,hash): feature_config = get_config(feature_config_path) base_query = SON([('__config_hash__',hash)]) image_params = SON([('image',feature_config['image'])]) models_params = feature_config['models'] ntrain = task_config['ntrain'] ntest = task_config['ntest'] ntrain_pos = task_config.get('ntrain_pos') N = task_config.get('N',10) query = task_config['query'] base_query.update(reach_in('config',task_config.get('universe',SON([])))) print('\n') print('BASE',base_query) print('\n') conn = pm.Connection(document_class=SON) db = conn['v1'] fs = gridfs.GridFS(db, collection = 'model_performance') cquery = reach_in('config',query) for m in models_params: base_query_copy = base_query.copy() base_query_copy.update(reach_in('config.model',m)) splitdata, results = train_test(cquery,'v1','features',ntrain,ntest,ntrain_pos=ntrain_pos,N=N,universe=base_query_copy) splitpickle = cPickle.dumps(splitdata) data = SON([('feature_config_path',feature_config_path), ('model',m), ('task',son_escape(task_config)), ('image__aggregate__',son_escape(feature_config['image']))]) filename = get_filename(data) data.update(results) data['filename'] = filename fs.put(splitpickle,**data) createCertificateDict(outfile,{'task_config':task_config,'feature_config':feature_config,'feature_config_path':feature_config_path})
class BaseDocument(object): # TODO simplify how `_changed_fields` is used. # Currently, handling of `_changed_fields` seems unnecessarily convoluted: # 1. `BaseDocument` defines `_changed_fields` in its `__slots__`, yet it's # not setting it to `[]` (or any other value) in `__init__`. # 2. `EmbeddedDocument` sets `_changed_fields` to `[]` it its overloaded # `__init__`. # 3. `Document` does NOT set `_changed_fields` upon initialization. The # field is primarily set via `_from_son` or `_clear_changed_fields`, # though there are also other methods that manipulate it. # 4. The codebase is littered with `hasattr` calls for `_changed_fields`. __slots__ = ( "_changed_fields", "_initialised", "_created", "_data", "_dynamic_fields", "_auto_id_field", "_db_field_map", "__weakref__", ) _dynamic = False _dynamic_lock = True STRICT = False def __init__(self, *args, **values): """ Initialise a document or an embedded document. :param values: A dictionary of keys and values for the document. It may contain additional reserved keywords, e.g. "__auto_convert". :param __auto_convert: If True, supplied values will be converted to Python-type values via each field's `to_python` method. :param __only_fields: A set of fields that have been loaded for this document. Empty if all fields have been loaded. :param _created: Indicates whether this is a brand new document or whether it's already been persisted before. Defaults to true. """ self._initialised = False self._created = True if args: raise TypeError( "Instantiating a document with positional arguments is not " "supported. Please use `field_name=value` keyword arguments." ) __auto_convert = values.pop("__auto_convert", True) __only_fields = set(values.pop("__only_fields", values)) _created = values.pop("_created", True) signals.pre_init.send(self.__class__, document=self, values=values) # Check if there are undefined fields supplied to the constructor, # if so raise an Exception. if not self._dynamic and (self._meta.get("strict", True) or _created): _undefined_fields = set(values.keys()) - set( list(self._fields.keys()) + ["id", "pk", "_cls", "_text_score"] ) if _undefined_fields: msg = ('The fields "{0}" do not exist on the document "{1}"').format( _undefined_fields, self._class_name ) raise FieldDoesNotExist(msg) if self.STRICT and not self._dynamic: self._data = StrictDict.create(allowed_keys=self._fields_ordered)() else: self._data = {} self._dynamic_fields = SON() # Assign default values to the instance. # We set default values only for fields loaded from DB. See # https://github.com/mongoengine/mongoengine/issues/399 for more info. for key, field in iteritems(self._fields): if self._db_field_map.get(key, key) in __only_fields: continue value = getattr(self, key, None) setattr(self, key, value) if "_cls" not in values: self._cls = self._class_name # Set passed values after initialisation if self._dynamic: dynamic_data = {} for key, value in iteritems(values): if key in self._fields or key == "_id": setattr(self, key, value) else: dynamic_data[key] = value else: FileField = _import_class("FileField") for key, value in iteritems(values): key = self._reverse_db_field_map.get(key, key) if key in self._fields or key in ("id", "pk", "_cls"): if __auto_convert and value is not None: field = self._fields.get(key) if field and not isinstance(field, FileField): value = field.to_python(value) setattr(self, key, value) else: self._data[key] = value # Set any get_<field>_display methods self.__set_field_display() if self._dynamic: self._dynamic_lock = False for key, value in iteritems(dynamic_data): setattr(self, key, value) # Flag initialised self._initialised = True self._created = _created signals.post_init.send(self.__class__, document=self) def __delattr__(self, *args, **kwargs): """Handle deletions of fields""" field_name = args[0] if field_name in self._fields: default = self._fields[field_name].default if callable(default): default = default() setattr(self, field_name, default) else: super(BaseDocument, self).__delattr__(*args, **kwargs) def __setattr__(self, name, value): # Handle dynamic data only if an initialised dynamic document if self._dynamic and not self._dynamic_lock: if not hasattr(self, name) and not name.startswith("_"): DynamicField = _import_class("DynamicField") field = DynamicField(db_field=name, null=True) field.name = name self._dynamic_fields[name] = field self._fields_ordered += (name,) if not name.startswith("_"): value = self.__expand_dynamic_values(name, value) # Handle marking data as changed if name in self._dynamic_fields: self._data[name] = value if hasattr(self, "_changed_fields"): self._mark_as_changed(name) try: self__created = self._created except AttributeError: self__created = True if ( self._is_document and not self__created and name in self._meta.get("shard_key", tuple()) and self._data.get(name) != value ): msg = "Shard Keys are immutable. Tried to update %s" % name raise OperationError(msg) try: self__initialised = self._initialised except AttributeError: self__initialised = False # Check if the user has created a new instance of a class if ( self._is_document and self__initialised and self__created and name == self._meta.get("id_field") ): super(BaseDocument, self).__setattr__("_created", False) super(BaseDocument, self).__setattr__(name, value) def __getstate__(self): data = {} for k in ( "_changed_fields", "_initialised", "_created", "_dynamic_fields", "_fields_ordered", ): if hasattr(self, k): data[k] = getattr(self, k) data["_data"] = self.to_mongo() return data def __setstate__(self, data): if isinstance(data["_data"], SON): data["_data"] = self.__class__._from_son(data["_data"])._data for k in ( "_changed_fields", "_initialised", "_created", "_data", "_dynamic_fields", ): if k in data: setattr(self, k, data[k]) if "_fields_ordered" in data: if self._dynamic: setattr(self, "_fields_ordered", data["_fields_ordered"]) else: _super_fields_ordered = type(self)._fields_ordered setattr(self, "_fields_ordered", _super_fields_ordered) dynamic_fields = data.get("_dynamic_fields") or SON() for k in dynamic_fields.keys(): setattr(self, k, data["_data"].get(k)) def __iter__(self): return iter(self._fields_ordered) def __getitem__(self, name): """Dictionary-style field access, return a field's value if present. """ try: if name in self._fields_ordered: return getattr(self, name) except AttributeError: pass raise KeyError(name) def __setitem__(self, name, value): """Dictionary-style field access, set a field's value. """ # Ensure that the field exists before settings its value if not self._dynamic and name not in self._fields: raise KeyError(name) return setattr(self, name, value) def __contains__(self, name): try: val = getattr(self, name) return val is not None except AttributeError: return False def __len__(self): return len(self._data) def __repr__(self): try: u = self.__str__() except (UnicodeEncodeError, UnicodeDecodeError): u = "[Bad Unicode data]" repr_type = str if u is None else type(u) return repr_type("<%s: %s>" % (self.__class__.__name__, u)) def __str__(self): # TODO this could be simpler? if hasattr(self, "__unicode__"): if six.PY3: return self.__unicode__() else: return six.text_type(self).encode("utf-8") return six.text_type("%s object" % self.__class__.__name__) def __eq__(self, other): if ( isinstance(other, self.__class__) and hasattr(other, "id") and other.id is not None ): return self.id == other.id if isinstance(other, DBRef): return ( self._get_collection_name() == other.collection and self.id == other.id ) if self.id is None: return self is other return False def __ne__(self, other): return not self.__eq__(other) def clean(self): """ Hook for doing document level data cleaning before validation is run. Any ValidationError raised by this method will not be associated with a particular field; it will have a special-case association with the field defined by NON_FIELD_ERRORS. """ pass def get_text_score(self): """ Get text score from text query """ if "_text_score" not in self._data: raise InvalidDocumentError( "This document is not originally built from a text query" ) return self._data["_text_score"] def to_mongo(self, use_db_field=True, fields=None): """ Return as SON data ready for use with MongoDB. """ fields = fields or [] data = SON() data["_id"] = None data["_cls"] = self._class_name # only root fields ['test1.a', 'test2'] => ['test1', 'test2'] root_fields = {f.split(".")[0] for f in fields} for field_name in self: if root_fields and field_name not in root_fields: continue value = self._data.get(field_name, None) field = self._fields.get(field_name) if field is None and self._dynamic: field = self._dynamic_fields.get(field_name) if value is not None: f_inputs = field.to_mongo.__code__.co_varnames ex_vars = {} if fields and "fields" in f_inputs: key = "%s." % field_name embedded_fields = [ i.replace(key, "") for i in fields if i.startswith(key) ] ex_vars["fields"] = embedded_fields if "use_db_field" in f_inputs: ex_vars["use_db_field"] = use_db_field value = field.to_mongo(value, **ex_vars) # Handle self generating fields if value is None and field._auto_gen: value = field.generate() self._data[field_name] = value if (value is not None) or (field.null): if use_db_field: data[field.db_field] = value else: data[field.name] = value # Only add _cls if allow_inheritance is True if not self._meta.get("allow_inheritance"): data.pop("_cls") return data def validate(self, clean=True): """Ensure that all fields' values are valid and that required fields are present. Raises :class:`ValidationError` if any of the fields' values are found to be invalid. """ # Ensure that each field is matched to a valid value errors = {} if clean: try: self.clean() except ValidationError as error: errors[NON_FIELD_ERRORS] = error # Get a list of tuples of field names and their current values fields = [ ( self._fields.get(name, self._dynamic_fields.get(name)), self._data.get(name), ) for name in self._fields_ordered ] EmbeddedDocumentField = _import_class("EmbeddedDocumentField") GenericEmbeddedDocumentField = _import_class("GenericEmbeddedDocumentField") for field, value in fields: if value is not None: try: if isinstance( field, (EmbeddedDocumentField, GenericEmbeddedDocumentField) ): field._validate(value, clean=clean) else: field._validate(value) except ValidationError as error: errors[field.name] = error.errors or error except (ValueError, AttributeError, AssertionError) as error: errors[field.name] = error elif field.required and not getattr(field, "_auto_gen", False): errors[field.name] = ValidationError( "Field is required", field_name=field.name ) if errors: pk = "None" if hasattr(self, "pk"): pk = self.pk elif self._instance and hasattr(self._instance, "pk"): pk = self._instance.pk message = "ValidationError (%s:%s) " % (self._class_name, pk) raise ValidationError(message, errors=errors) def to_json(self, *args, **kwargs): """Convert this document to JSON. :param use_db_field: Serialize field names as they appear in MongoDB (as opposed to attribute names on this document). Defaults to True. """ use_db_field = kwargs.pop("use_db_field", True) return json_util.dumps(self.to_mongo(use_db_field), *args, **kwargs) @classmethod def from_json(cls, json_data, created=False): """Converts json data to a Document instance :param str json_data: The json data to load into the Document :param bool created: Boolean defining whether to consider the newly instantiated document as brand new or as persisted already: * If True, consider the document as brand new, no matter what data it's loaded with (i.e. even if an ID is loaded). * If False and an ID is NOT provided, consider the document as brand new. * If False and an ID is provided, assume that the object has already been persisted (this has an impact on the subsequent call to .save()). * Defaults to ``False``. """ # TODO should `created` default to False? If the object already exists # in the DB, you would likely retrieve it from MongoDB itself through # a query, not load it from JSON data. return cls._from_son(json_util.loads(json_data), created=created) def __expand_dynamic_values(self, name, value): """Expand any dynamic values to their correct types / values.""" if not isinstance(value, (dict, list, tuple)): return value # If the value is a dict with '_cls' in it, turn it into a document is_dict = isinstance(value, dict) if is_dict and "_cls" in value: cls = get_document(value["_cls"]) return cls(**value) if is_dict: value = {k: self.__expand_dynamic_values(k, v) for k, v in value.items()} else: value = [self.__expand_dynamic_values(name, v) for v in value] # Convert lists / values so we can watch for any changes on them EmbeddedDocumentListField = _import_class("EmbeddedDocumentListField") if isinstance(value, (list, tuple)) and not isinstance(value, BaseList): if issubclass(type(self), EmbeddedDocumentListField): value = EmbeddedDocumentList(value, self, name) else: value = BaseList(value, self, name) elif isinstance(value, dict) and not isinstance(value, BaseDict): value = BaseDict(value, self, name) return value def _mark_as_changed(self, key): """Mark a key as explicitly changed by the user.""" if not key: return if not hasattr(self, "_changed_fields"): return if "." in key: key, rest = key.split(".", 1) key = self._db_field_map.get(key, key) key = "%s.%s" % (key, rest) else: key = self._db_field_map.get(key, key) if key not in self._changed_fields: levels, idx = key.split("."), 1 while idx <= len(levels): if ".".join(levels[:idx]) in self._changed_fields: break idx += 1 else: self._changed_fields.append(key) # remove lower level changed fields level = ".".join(levels[:idx]) + "." remove = self._changed_fields.remove for field in self._changed_fields[:]: if field.startswith(level): remove(field) def _clear_changed_fields(self): """Using _get_changed_fields iterate and remove any fields that are marked as changed. """ for changed in self._get_changed_fields(): parts = changed.split(".") data = self for part in parts: if isinstance(data, list): try: data = data[int(part)] except IndexError: data = None elif isinstance(data, dict): data = data.get(part, None) else: data = getattr(data, part, None) if not isinstance(data, LazyReference) and hasattr( data, "_changed_fields" ): if getattr(data, "_is_document", False): continue data._changed_fields = [] self._changed_fields = [] def _nestable_types_changed_fields(self, changed_fields, base_key, data): """Inspect nested data for changed fields :param changed_fields: Previously collected changed fields :param base_key: The base key that must be used to prepend changes to this data :param data: data to inspect for changes """ # Loop list / dict fields as they contain documents # Determine the iterator to use if not hasattr(data, "items"): iterator = enumerate(data) else: iterator = iteritems(data) for index_or_key, value in iterator: item_key = "%s%s." % (base_key, index_or_key) # don't check anything lower if this key is already marked # as changed. if item_key[:-1] in changed_fields: continue if hasattr(value, "_get_changed_fields"): changed = value._get_changed_fields() changed_fields += ["%s%s" % (item_key, k) for k in changed if k] elif isinstance(value, (list, tuple, dict)): self._nestable_types_changed_fields(changed_fields, item_key, value) def _get_changed_fields(self): """Return a list of all fields that have explicitly been changed. """ EmbeddedDocument = _import_class("EmbeddedDocument") ReferenceField = _import_class("ReferenceField") GenericReferenceField = _import_class("GenericReferenceField") SortedListField = _import_class("SortedListField") changed_fields = [] changed_fields += getattr(self, "_changed_fields", []) for field_name in self._fields_ordered: db_field_name = self._db_field_map.get(field_name, field_name) key = "%s." % db_field_name data = self._data.get(field_name, None) field = self._fields.get(field_name) if db_field_name in changed_fields: # Whole field already marked as changed, no need to go further continue if isinstance(field, ReferenceField): # Don't follow referenced documents continue if isinstance(data, EmbeddedDocument): # Find all embedded fields that have been changed changed = data._get_changed_fields() changed_fields += ["%s%s" % (key, k) for k in changed if k] elif isinstance(data, (list, tuple, dict)): if hasattr(field, "field") and isinstance( field.field, (ReferenceField, GenericReferenceField) ): continue elif isinstance(field, SortedListField) and field._ordering: # if ordering is affected whole list is changed if any(field._ordering in d._changed_fields for d in data): changed_fields.append(db_field_name) continue self._nestable_types_changed_fields(changed_fields, key, data) return changed_fields def _delta(self): """Returns the delta (set, unset) of the changes for a document. Gets any values that have been explicitly changed. """ # Handles cases where not loaded from_son but has _id doc = self.to_mongo() set_fields = self._get_changed_fields() unset_data = {} if hasattr(self, "_changed_fields"): set_data = {} # Fetch each set item from its path for path in set_fields: parts = path.split(".") d = doc new_path = [] for p in parts: if isinstance(d, (ObjectId, DBRef)): # Don't dig in the references break elif isinstance(d, list) and p.isdigit(): # An item of a list (identified by its index) is updated d = d[int(p)] elif hasattr(d, "get"): # dict-like (dict, embedded document) d = d.get(p) new_path.append(p) path = ".".join(new_path) set_data[path] = d else: set_data = doc if "_id" in set_data: del set_data["_id"] # Determine if any changed items were actually unset. for path, value in set_data.items(): if value or isinstance( value, (numbers.Number, bool) ): # Account for 0 and True that are truthy continue parts = path.split(".") if self._dynamic and len(parts) and parts[0] in self._dynamic_fields: del set_data[path] unset_data[path] = 1 continue # If we've set a value that ain't the default value don't unset it. default = None if path in self._fields: default = self._fields[path].default else: # Perform a full lookup for lists / embedded lookups d = self db_field_name = parts.pop() for p in parts: if isinstance(d, list) and p.isdigit(): d = d[int(p)] elif hasattr(d, "__getattribute__") and not isinstance(d, dict): real_path = d._reverse_db_field_map.get(p, p) d = getattr(d, real_path) else: d = d.get(p) if hasattr(d, "_fields"): field_name = d._reverse_db_field_map.get( db_field_name, db_field_name ) if field_name in d._fields: default = d._fields.get(field_name).default else: default = None if default is not None: default = default() if callable(default) else default if value != default: continue del set_data[path] unset_data[path] = 1 return set_data, unset_data @classmethod def _get_collection_name(cls): """Return the collection name for this class. None for abstract class. """ return cls._meta.get("collection", None) @classmethod def _from_son(cls, son, _auto_dereference=True, only_fields=None, created=False): """Create an instance of a Document (subclass) from a PyMongo SON.""" if not only_fields: only_fields = [] if son and not isinstance(son, dict): raise ValueError( "The source SON object needs to be of type 'dict' but a '%s' was found" % type(son) ) # Get the class name from the document, falling back to the given # class if unavailable class_name = son.get("_cls", cls._class_name) # Convert SON to a data dict, making sure each key is a string and # corresponds to the right db field. data = {} for key, value in iteritems(son): key = str(key) key = cls._db_field_map.get(key, key) data[key] = value # Return correct subclass for document type if class_name != cls._class_name: cls = get_document(class_name) errors_dict = {} fields = cls._fields if not _auto_dereference: fields = copy.deepcopy(fields) for field_name, field in iteritems(fields): field._auto_dereference = _auto_dereference if field.db_field in data: value = data[field.db_field] try: data[field_name] = ( value if value is None else field.to_python(value) ) if field_name != field.db_field: del data[field.db_field] except (AttributeError, ValueError) as e: errors_dict[field_name] = e if errors_dict: errors = "\n".join( ["Field '%s' - %s" % (k, v) for k, v in errors_dict.items()] ) msg = "Invalid data to create a `%s` instance.\n%s" % ( cls._class_name, errors, ) raise InvalidDocumentError(msg) # In STRICT documents, remove any keys that aren't in cls._fields if cls.STRICT: data = {k: v for k, v in iteritems(data) if k in cls._fields} obj = cls( __auto_convert=False, _created=created, __only_fields=only_fields, **data ) obj._changed_fields = [] if not _auto_dereference: obj._fields = fields return obj @classmethod def _build_index_specs(cls, meta_indexes): """Generate and merge the full index specs.""" geo_indices = cls._geo_indices() unique_indices = cls._unique_with_indexes() index_specs = [cls._build_index_spec(spec) for spec in meta_indexes] def merge_index_specs(index_specs, indices): """Helper method for merging index specs.""" if not indices: return index_specs # Create a map of index fields to index spec. We're converting # the fields from a list to a tuple so that it's hashable. spec_fields = {tuple(index["fields"]): index for index in index_specs} # For each new index, if there's an existing index with the same # fields list, update the existing spec with all data from the # new spec. for new_index in indices: candidate = spec_fields.get(tuple(new_index["fields"])) if candidate is None: index_specs.append(new_index) else: candidate.update(new_index) return index_specs # Merge geo indexes and unique_with indexes into the meta index specs. index_specs = merge_index_specs(index_specs, geo_indices) index_specs = merge_index_specs(index_specs, unique_indices) return index_specs @classmethod def _build_index_spec(cls, spec): """Build a PyMongo index spec from a MongoEngine index spec.""" if isinstance(spec, six.string_types): spec = {"fields": [spec]} elif isinstance(spec, (list, tuple)): spec = {"fields": list(spec)} elif isinstance(spec, dict): spec = dict(spec) index_list = [] direction = None # Check to see if we need to include _cls allow_inheritance = cls._meta.get("allow_inheritance") include_cls = ( allow_inheritance and not spec.get("sparse", False) and spec.get("cls", True) and "_cls" not in spec["fields"] ) # 733: don't include cls if index_cls is False unless there is an explicit cls with the index include_cls = include_cls and ( spec.get("cls", False) or cls._meta.get("index_cls", True) ) if "cls" in spec: spec.pop("cls") for key in spec["fields"]: # If inherited spec continue if isinstance(key, (list, tuple)): continue # ASCENDING from + # DESCENDING from - # TEXT from $ # HASHED from # # GEOSPHERE from ( # GEOHAYSTACK from ) # GEO2D from * direction = pymongo.ASCENDING if key.startswith("-"): direction = pymongo.DESCENDING elif key.startswith("$"): direction = pymongo.TEXT elif key.startswith("#"): direction = pymongo.HASHED elif key.startswith("("): direction = pymongo.GEOSPHERE elif key.startswith(")"): direction = pymongo.GEOHAYSTACK elif key.startswith("*"): direction = pymongo.GEO2D if key.startswith(("+", "-", "*", "$", "#", "(", ")")): key = key[1:] # Use real field name, do it manually because we need field # objects for the next part (list field checking) parts = key.split(".") if parts in (["pk"], ["id"], ["_id"]): key = "_id" else: fields = cls._lookup_field(parts) parts = [] for field in fields: try: if field != "_id": field = field.db_field except AttributeError: pass parts.append(field) key = ".".join(parts) index_list.append((key, direction)) # Don't add cls to a geo index if include_cls and direction not in ( pymongo.GEO2D, pymongo.GEOHAYSTACK, pymongo.GEOSPHERE, ): index_list.insert(0, ("_cls", 1)) if index_list: spec["fields"] = index_list return spec @classmethod def _unique_with_indexes(cls, namespace=""): """Find unique indexes in the document schema and return them.""" unique_indexes = [] for field_name, field in cls._fields.items(): sparse = field.sparse # Generate a list of indexes needed by uniqueness constraints if field.unique: unique_fields = [field.db_field] # Add any unique_with fields to the back of the index spec if field.unique_with: if isinstance(field.unique_with, six.string_types): field.unique_with = [field.unique_with] # Convert unique_with field names to real field names unique_with = [] for other_name in field.unique_with: parts = other_name.split(".") # Lookup real name parts = cls._lookup_field(parts) name_parts = [part.db_field for part in parts] unique_with.append(".".join(name_parts)) # Unique field should be required parts[-1].required = True sparse = not sparse and parts[-1].name not in cls.__dict__ unique_fields += unique_with # Add the new index to the list fields = [ ("%s%s" % (namespace, f), pymongo.ASCENDING) for f in unique_fields ] index = {"fields": fields, "unique": True, "sparse": sparse} unique_indexes.append(index) if field.__class__.__name__ in { "EmbeddedDocumentListField", "ListField", "SortedListField", }: field = field.field # Grab any embedded document field unique indexes if ( field.__class__.__name__ == "EmbeddedDocumentField" and field.document_type != cls ): field_namespace = "%s." % field_name doc_cls = field.document_type unique_indexes += doc_cls._unique_with_indexes(field_namespace) return unique_indexes @classmethod def _geo_indices(cls, inspected=None, parent_field=None): inspected = inspected or [] geo_indices = [] inspected.append(cls) geo_field_type_names = ( "EmbeddedDocumentField", "GeoPointField", "PointField", "LineStringField", "PolygonField", ) geo_field_types = tuple( [_import_class(field) for field in geo_field_type_names] ) for field in cls._fields.values(): if not isinstance(field, geo_field_types): continue if hasattr(field, "document_type"): field_cls = field.document_type if field_cls in inspected: continue if hasattr(field_cls, "_geo_indices"): geo_indices += field_cls._geo_indices( inspected, parent_field=field.db_field ) elif field._geo_index: field_name = field.db_field if parent_field: field_name = "%s.%s" % (parent_field, field_name) geo_indices.append({"fields": [(field_name, field._geo_index)]}) return geo_indices @classmethod def _lookup_field(cls, parts): """Given the path to a given field, return a list containing the Field object associated with that field and all of its parent Field objects. Args: parts (str, list, or tuple) - path to the field. Should be a string for simple fields existing on this document or a list of strings for a field that exists deeper in embedded documents. Returns: A list of Field instances for fields that were found or strings for sub-fields that weren't. Example: >>> user._lookup_field('name') [<mongoengine.fields.StringField at 0x1119bff50>] >>> user._lookup_field('roles') [<mongoengine.fields.EmbeddedDocumentListField at 0x1119ec250>] >>> user._lookup_field(['roles', 'role']) [<mongoengine.fields.EmbeddedDocumentListField at 0x1119ec250>, <mongoengine.fields.StringField at 0x1119ec050>] >>> user._lookup_field('doesnt_exist') raises LookUpError >>> user._lookup_field(['roles', 'doesnt_exist']) [<mongoengine.fields.EmbeddedDocumentListField at 0x1119ec250>, 'doesnt_exist'] """ # TODO this method is WAY too complicated. Simplify it. # TODO don't think returning a string for embedded non-existent fields is desired ListField = _import_class("ListField") DynamicField = _import_class("DynamicField") if not isinstance(parts, (list, tuple)): parts = [parts] fields = [] field = None for field_name in parts: # Handle ListField indexing: if field_name.isdigit() and isinstance(field, ListField): fields.append(field_name) continue # Look up first field from the document if field is None: if field_name == "pk": # Deal with "primary key" alias field_name = cls._meta["id_field"] if field_name in cls._fields: field = cls._fields[field_name] elif cls._dynamic: field = DynamicField(db_field=field_name) elif cls._meta.get("allow_inheritance") or cls._meta.get( "abstract", False ): # 744: in case the field is defined in a subclass for subcls in cls.__subclasses__(): try: field = subcls._lookup_field([field_name])[0] except LookUpError: continue if field is not None: break else: raise LookUpError('Cannot resolve field "%s"' % field_name) else: raise LookUpError('Cannot resolve field "%s"' % field_name) else: ReferenceField = _import_class("ReferenceField") GenericReferenceField = _import_class("GenericReferenceField") # If previous field was a reference, throw an error (we # cannot look up fields that are on references). if isinstance(field, (ReferenceField, GenericReferenceField)): raise LookUpError( "Cannot perform join in mongoDB: %s" % "__".join(parts) ) # If the parent field has a "field" attribute which has a # lookup_member method, call it to find the field # corresponding to this iteration. if hasattr(getattr(field, "field", None), "lookup_member"): new_field = field.field.lookup_member(field_name) # If the parent field is a DynamicField or if it's part of # a DynamicDocument, mark current field as a DynamicField # with db_name equal to the field name. elif cls._dynamic and ( isinstance(field, DynamicField) or getattr(getattr(field, "document_type", None), "_dynamic", None) ): new_field = DynamicField(db_field=field_name) # Else, try to use the parent field's lookup_member method # to find the subfield. elif hasattr(field, "lookup_member"): new_field = field.lookup_member(field_name) # Raise a LookUpError if all the other conditions failed. else: raise LookUpError( "Cannot resolve subfield or operator {} " "on the field {}".format(field_name, field.name) ) # If current field still wasn't found and the parent field # is a ComplexBaseField, add the name current field name and # move on. if not new_field and isinstance(field, ComplexBaseField): fields.append(field_name) continue elif not new_field: raise LookUpError('Cannot resolve field "%s"' % field_name) field = new_field # update field to the new field type fields.append(field) return fields @classmethod def _translate_field_name(cls, field, sep="."): """Translate a field attribute name to a database field name. """ parts = field.split(sep) parts = [f.db_field for f in cls._lookup_field(parts)] return ".".join(parts) def __set_field_display(self): """For each field that specifies choices, create a get_<field>_display method. """ fields_with_choices = [(n, f) for n, f in self._fields.items() if f.choices] for attr_name, field in fields_with_choices: setattr( self, "get_%s_display" % attr_name, partial(self.__get_field_display, field=field), ) def __get_field_display(self, field): """Return the display value for a choice field""" value = getattr(self, field.name) if field.choices and isinstance(field.choices[0], (list, tuple)): if value is None: return None sep = getattr(field, "display_sep", " ") values = ( value if field.__class__.__name__ in ("ListField", "SortedListField") else [value] ) return sep.join( [ six.text_type(dict(field.choices).get(val, val)) for val in values or [] ] ) return value
def build_itemsets(distance, undefined=True, only_tourists=True): hash_table = dict() count = 0 places = create_place_dict() f = open('data/encoded_itemsets.txt', mode='w') g = open('data/translation_table.txt', mode='w') o = open('data/decoded_itemsets.txt', mode='w') q1 = {'touristLocal': 'tourist'} q2 = {'$or': [{'touristLocal': 'tourist'}, {'touristLocal': 'undefined'}]} if only_tourists: query = q1 else: query = q2 for tourist in ucol.find(query): tweets = [] item_set = set() places_set = set() for sequence in scol.find({ 'user_id': tourist['id'] }).sort('serial', pymongo.ASCENDING): for id in sequence['sequence']: for tweet in tcol.find({'id': id}): tweets.append(tweet) lon = tweet['coordinates']['coordinates'][0] lat = tweet['coordinates']['coordinates'][1] queryNear = { 'coordinates': SON([('$near', { "type": "Point", "coordinates": [lon, lat] }), ('$maxDistance', distance)]) } close_pois = pcol.find(queryNear) closest_poi = 'UNDEFINED' if close_pois.count() > 0: place_id = close_pois.next()['id'] code_place = places.get(place_id, -1) if code_place == -1: continue closest_poi = code_place else: if not undefined: continue code = hash_table.get(closest_poi, 0) if code == 0: count += 1 hash_table[closest_poi] = count code = count item_set.add(code) places_set.add(code_place) if len(item_set) > 0: f.write(" ".join(list(map(str, sorted(item_set))))) f.write("\n") if len(places_set) > 0: o.write(" ".join(list(map(str, places_set)))) o.write("\n") aux = [] for key, value in hash_table.items(): aux += [(key, value)] sorted_aux = sorted(aux, key=lambda tup: tup[1]) for item in sorted_aux: g.write(str(item[1]) + "\t" + item[0] + "\n") return hash_table
def to_mongo(self, value): if isinstance(value, dict): return value return SON([("type", self._type), ("coordinates", value)])
def __init__(self, *args, **values): """ Initialise a document or an embedded document. :param dict values: A dictionary of keys and values for the document. It may contain additional reserved keywords, e.g. "__auto_convert". :param bool __auto_convert: If True, supplied values will be converted to Python-type values via each field's `to_python` method. :param set __only_fields: A set of fields that have been loaded for this document. Empty if all fields have been loaded. :param bool _created: Indicates whether this is a brand new document or whether it's already been persisted before. Defaults to true. """ self._initialised = False self._created = True if args: # Combine positional arguments with named arguments. # We only want named arguments. field = iter(self._fields_ordered) # If its an automatic id field then skip to the first defined field if getattr(self, '_auto_id_field', False): next(field) for value in args: name = next(field) if name in values: raise TypeError( 'Multiple values for keyword argument "%s"' % name) values[name] = value __auto_convert = values.pop('__auto_convert', True) __only_fields = set(values.pop('__only_fields', values)) _created = values.pop('_created', True) signals.pre_init.send(self.__class__, document=self, values=values) # Check if there are undefined fields supplied to the constructor, # if so raise an Exception. if not self._dynamic and (self._meta.get('strict', True) or _created): _undefined_fields = set( values.keys()) - set(self._fields.keys() + ['id', 'pk', '_cls', '_text_score']) if _undefined_fields: msg = ('The fields "{0}" do not exist on the document "{1}"' ).format(_undefined_fields, self._class_name) raise FieldDoesNotExist(msg) if self.STRICT and not self._dynamic: self._data = StrictDict.create(allowed_keys=self._fields_ordered)() else: self._data = {} self._dynamic_fields = SON() # Assign default values to the instance. # We set default values only for fields loaded from DB. See # https://github.com/mongoengine/mongoengine/issues/399 for more info. for key, field in iteritems(self._fields): if self._db_field_map.get(key, key) in __only_fields: continue value = getattr(self, key, None) setattr(self, key, value) if '_cls' not in values: self._cls = self._class_name # Set passed values after initialisation if self._dynamic: dynamic_data = {} for key, value in iteritems(values): if key in self._fields or key == '_id': setattr(self, key, value) else: dynamic_data[key] = value else: FileField = _import_class('FileField') for key, value in iteritems(values): key = self._reverse_db_field_map.get(key, key) if key in self._fields or key in ('id', 'pk', '_cls'): if __auto_convert and value is not None: field = self._fields.get(key) if field and not isinstance(field, FileField): value = field.to_python(value) setattr(self, key, value) else: self._data[key] = value # Set any get_<field>_display methods self.__set_field_display() if self._dynamic: self._dynamic_lock = False for key, value in iteritems(dynamic_data): setattr(self, key, value) # Flag initialised self._initialised = True self._created = _created signals.post_init.send(self.__class__, document=self)
def __init__(self, *args, **values): """ Initialise a document or embedded document :param __auto_convert: Try and will cast python objects to Object types :param values: A dictionary of values for the document """ self._initialised = False self._created = True if args: # Combine positional arguments with named arguments. # We only want named arguments. field = iter(self._fields_ordered) # If its an automatic id field then skip to the first defined field if getattr(self, '_auto_id_field', False): next(field) for value in args: name = next(field) if name in values: raise TypeError( 'Multiple values for keyword argument "%s"' % name) values[name] = value __auto_convert = values.pop('__auto_convert', True) # 399: set default values only to fields loaded from DB __only_fields = set(values.pop('__only_fields', values)) _created = values.pop('_created', True) signals.pre_init.send(self.__class__, document=self, values=values) # Check if there are undefined fields supplied to the constructor, # if so raise an Exception. if not self._dynamic and (self._meta.get('strict', True) or _created): _undefined_fields = set( values.keys()) - set(self._fields.keys() + ['id', 'pk', '_cls', '_text_score']) if _undefined_fields: msg = ('The fields "{0}" do not exist on the document "{1}"' ).format(_undefined_fields, self._class_name) raise FieldDoesNotExist(msg) if self.STRICT and not self._dynamic: self._data = StrictDict.create(allowed_keys=self._fields_ordered)() else: self._data = {} self._dynamic_fields = SON() # Assign default values to instance for key, field in self._fields.iteritems(): if self._db_field_map.get(key, key) in __only_fields: continue value = getattr(self, key, None) setattr(self, key, value) if '_cls' not in values: self._cls = self._class_name # Set passed values after initialisation if self._dynamic: dynamic_data = {} for key, value in values.iteritems(): if key in self._fields or key == '_id': setattr(self, key, value) else: dynamic_data[key] = value else: FileField = _import_class('FileField') for key, value in values.iteritems(): key = self._reverse_db_field_map.get(key, key) if key in self._fields or key in ('id', 'pk', '_cls'): if __auto_convert and value is not None: field = self._fields.get(key) if field and not isinstance(field, FileField): value = field.to_python(value) setattr(self, key, value) else: self._data[key] = value # Set any get_<field>_display methods self.__set_field_display() if self._dynamic: self._dynamic_lock = False for key, value in dynamic_data.iteritems(): setattr(self, key, value) # Flag initialised self._initialised = True self._created = _created signals.post_init.send(self.__class__, document=self)
def import_from_handle(cls, handle, filename, identifiers, languages, mimetype, source): '''Import a file using a file-like object. The file is uploaded to the s3 bucket specified in `dlx.Config`. The metadata is stored in the database. All paramaters are required. Parameters ---------- handle : Any file-like object filename : str The destination filename. Files with common identifiers, languages, and filename are considered versions of each other identifiers : list(dlx.file.Identifier) languages : list(str) The ISO 639-1 codes of the languages of the content. Codes will be stored in uppercase. mimetype : str Must be a value recognized by s3, otherwise the upload will fail source : str Name of the process that called the import for auditing Returns ------- If succesful, the md5 checksum as a hex string (also used as the database record ID) of the imported file, otherwise `False` Raises ------ FileExists : The file is already in the system FileExistsIdentifierConflict : The file is already in the system but with different identifiers FileExistsLanguageConflict : The file is already in the system different languages ''' ### if len(identifiers) == 0 or len(languages) == 0: raise ValueError('Params `identifiers` and `languages` cannot be an empty list') for idx in identifiers: if not isinstance(idx, Identifier): raise TypeError('Identifier must be of type `dlx.file.Identifier`') for lang in languages: lang = lang.upper() if lang.lower() not in ISO6391.codes: raise ValueError('Invalid ISO 639-1 language code') ### hasher = hashlib.md5() while True: chunk = handle.read(8192) if chunk: hasher.update(chunk) else: break size = handle.tell() if size == 0: raise Exception('File-like object "{}" has no content'.format(handle)) checksum = hasher.hexdigest() File._check_file_exists(checksum, identifiers, languages) handle.seek(0) ### if S3.upload(handle, checksum, mimetype): db_result = DB.files.insert_one(SON({ '_id': checksum, 'filename': filename, 'identifiers': [SON({'type': idx.type, 'value': idx.value}) for idx in identifiers], 'languages': languages, 'mimetype': mimetype, 'size': size, 'source': source, 'timestamp': datetime.now(timezone.utc), 'uri': '{}.s3.amazonaws.com/{}'.format(S3.bucket, checksum), })) if db_result.acknowledged: return checksum return False
def to_mongo(self, value): if isinstance(value, dict): return value return SON([('type', self._type), ('coordinates', value)])
def greedy_optimization(outfile,task,image_certificate_file,initial_model,convolve_func,rep_limit, modifier_args,modifier): conn = pm.Connection(document_class=bson.SON) db = conn['v1'] opt_fs = gridfs.GridFS(db,'optimized_performance') image_coll = db['raw_images.files'] image_fs = gridfs.GridFS(db,'raw_images') image_certdict = cPickle.load(open(image_certificate_file)) print('using image certificate', image_certificate_file) image_hash = image_certdict['run_hash'] image_args = image_certdict['out_args'] if convolve_func == v1f.v1like_filter_pyfft: v1_pyfft.setup_pyfft() filterbanks = [] perfs = [] model_configs = [] center_config = initial_model i = 0 improving = True while ((i < rep_limit) or rep_limit is None): i += 1 print('Round', i) next_configs = [m for m in get_consistent_deltas(center_config,modifier) if m not in model_configs] if next_configs: next_results = [get_performance(task,image_hash,image_fs,m,convolve_func) for m in next_configs] next_perfs = [x[0] for x in next_results] next_filterbanks = [x[1] for x in next_results] next_perf_ac_max = np.array([x['test_accuracy'] for x in next_perfs]).max() perf_ac_max = max([x['test_accuracy'] for x in perfs]) if perfs else 0 if next_perf_ac_max > perf_ac_max: next_perf_ac_argmax = np.array([x['test_accuracy'] for x in next_perfs]).argmax() center_config = next_configs[next_perf_ac_argmax] print('\n\n') print('new best performance is', next_perf_ac_max, 'from model', center_config) print('\n\n') perfs.extend(next_perfs) model_configs.extend(next_configs) filterbanks.extend(next_filterbanks) else: print('Breaking because no further optimization could be done. Best existing performance was', perf_ac_max, 'while best next performance was', next_perf_ac_max) break else: print('Breaking because no next configs') break perfargmax = np.array([p['test_accuracy'] for p in perfs]).argmax() best_model = model_configs[perfargmax] best_performance = perfs[perfargmax] out_record = SON([('initial_model',initial_model), ('task',son_escape(task)), ('images',son_escape(image_args)), ('images_hash',image_hash), ('modifier_args',son_escape(modifier_args)), ('modifier',modifier.__class__.__module__ + '.' + modifier.__class__.__name__) ]) filename = get_filename(out_record) out_record['filename'] = filename out_record.update(SON([('performances',perfs)])) out_record.update(SON([('best_model',best_model)])) out_record.update(SON([('best_performance',best_performance)])) out_record.update(SON([('num_steps',len(model_configs))])) out_record.update(SON([('models',model_configs)])) outdata = cPickle.dumps(filterbanks) opt_fs.put(outdata,**out_record) if convolve_func == v1f.v1like_filter_pyfft: v1_pyfft.cleanup_pyfft() createCertificateDict(outfile,{'image_file':image_certificate_file})
def distinct_cmd(self, key, query=None): cmd = SON([('distinct', self._mongo_collection.name)]) cmd.update({'key': key}) cmd.update({'query': key}) if query else None return self.db_command(cmd)['values']
def set_fail_point(self, command_args): cmd = SON([("configureFailPoint", "failCommand")]) cmd.update(command_args) self.client.admin.command(cmd)
async def use_count_command(self): response = await self.db.command(SON([("count", "img_url")])) print(f'response:{pprint.pformat(response)}')
def userRegistration(collection): dataset = [] id = '' cursor = collection.aggregate([{ '$unwind': '$products' }, { '$match': { 'products.project.registration_date': { '$gte': dateutil.parser.parse('2017-09-01T00:00:00.000Z') } } }, { '$project': { 'email': 1, 'products': 1, 'pay_total': 1, 'contacts': 1, '__v': 1 } }, { '$sort': SON([('products.project.registration_date', 1)]) }]) for user in cursor: products = user['products'] project = products['project'] company = project.get('company', '') last_utm_source = '' last_utm_medium = '' last_utm_campaign = '' sector = '' productName = 'product_one' if (products['_id'] == id) else 'product_two' try: utm = map(mapUtm, project["utm_sources"]) list_sources = ','.join(utm) except AttributeError: print('Utm is empty') if ('last_mark' in project): if ('utm' in project['last_mark']): lastUtm = utmParse(project['last_mark']['utm'], True) last_utm_source = lastUtm['utm_source'] if ( type(lastUtm) == dict and 'utm_source' in lastUtm) else '' last_utm_medium = lastUtm['utm_medium'] if ( type(lastUtm) == dict and 'utm_medium' in lastUtm) else '' last_utm_campaign = lastUtm['utm_campaign'] if ( type(lastUtm) == dict and 'utm_campaign' in lastUtm) else '' if ('sector' in project): if ('name' in project['sector']): sector = project['sector']['name'] dataset.append({ 'registration_date': project['registration_date'], 'product': productName, 'pay_total': project['pay_total'], 'name': project['name'], 'phone': project['phone'], 'company': company, 'email': user['email'], 'sector': sector, 'last_click_source': last_utm_source, 'last_click_channel': last_utm_medium, 'last_click_campaign': last_utm_campaign, 'multi_channeel': list_sources }) else: return dataset
def query(_doc_cls=None, **kwargs): """Transform a query from Django-style format to Mongo format.""" mongo_query = {} merge_query = defaultdict(list) for key, value in sorted(kwargs.items()): if key == '__raw__': mongo_query.update(value) continue parts = key.rsplit('__') indices = [(i, p) for i, p in enumerate(parts) if p.isdigit()] parts = [part for part in parts if not part.isdigit()] # Check for an operator and transform to mongo-style if there is op = None if len(parts) > 1 and parts[-1] in MATCH_OPERATORS: op = parts.pop() # Allow to escape operator-like field name by __ if len(parts) > 1 and parts[-1] == '': parts.pop() negate = False if len(parts) > 1 and parts[-1] == 'not': parts.pop() negate = True if _doc_cls: # Switch field names to proper names [set in Field(name='foo')] try: fields = _doc_cls._lookup_field(parts) except Exception as e: raise InvalidQueryError(e) parts = [] CachedReferenceField = _import_class('CachedReferenceField') GenericReferenceField = _import_class('GenericReferenceField') cleaned_fields = [] for field in fields: append_field = True if isinstance(field, six.string_types): parts.append(field) append_field = False # is last and CachedReferenceField elif isinstance(field, CachedReferenceField) and fields[-1] == field: parts.append('%s._id' % field.db_field) else: parts.append(field.db_field) if append_field: cleaned_fields.append(field) # Convert value to proper value field = cleaned_fields[-1] singular_ops = [None, 'ne', 'gt', 'gte', 'lt', 'lte', 'not'] singular_ops += STRING_OPERATORS if op in singular_ops: if isinstance(field, six.string_types): if (op in STRING_OPERATORS and isinstance(value, six.string_types)): StringField = _import_class('StringField') value = StringField.prepare_query_value(op, value) else: value = field else: value = field.prepare_query_value(op, value) if isinstance(field, CachedReferenceField) and value: value = value['_id'] elif op in ('in', 'nin', 'all', 'near') and not isinstance(value, dict): # Raise an error if the in/nin/all/near param is not iterable. value = _prepare_query_for_iterable(field, op, value) # If we're querying a GenericReferenceField, we need to alter the # key depending on the value: # * If the value is a DBRef, the key should be "field_name._ref". # * If the value is an ObjectId, the key should be "field_name._ref.$id". if isinstance(field, GenericReferenceField): if isinstance(value, DBRef): parts[-1] += '._ref' elif isinstance(value, ObjectId): parts[-1] += '._ref.$id' # if op and op not in COMPARISON_OPERATORS: if op: if op in GEO_OPERATORS: value = _geo_operator(field, op, value) elif op in ('match', 'elemMatch'): ListField = _import_class('ListField') EmbeddedDocumentField = _import_class('EmbeddedDocumentField') if (isinstance(value, dict) and isinstance(field, ListField) and isinstance(field.field, EmbeddedDocumentField)): value = query(field.field.document_type, **value) else: value = field.prepare_query_value(op, value) value = {'$elemMatch': value} elif op in CUSTOM_OPERATORS: NotImplementedError('Custom method "%s" has not ' 'been implemented' % op) elif op not in STRING_OPERATORS: value = {'$' + op: value} if negate: value = {'$not': value} for i, part in indices: parts.insert(i, part) key = '.'.join(parts) if op is None or key not in mongo_query: mongo_query[key] = value elif key in mongo_query: if isinstance(mongo_query[key], dict): mongo_query[key].update(value) # $max/minDistance needs to come last - convert to SON value_dict = mongo_query[key] if ('$maxDistance' in value_dict or '$minDistance' in value_dict) and \ ('$near' in value_dict or '$nearSphere' in value_dict): value_son = SON() for k, v in value_dict.iteritems(): if k == '$maxDistance' or k == '$minDistance': continue value_son[k] = v # Required for MongoDB >= 2.6, may fail when combining # PyMongo 3+ and MongoDB < 2.6 near_embedded = False for near_op in ('$near', '$nearSphere'): if isinstance(value_dict.get(near_op), dict) and ( IS_PYMONGO_3 or get_connection().max_wire_version > 1): value_son[near_op] = SON(value_son[near_op]) if '$maxDistance' in value_dict: value_son[near_op][ '$maxDistance'] = value_dict[ '$maxDistance'] if '$minDistance' in value_dict: value_son[near_op][ '$minDistance'] = value_dict[ '$minDistance'] near_embedded = True if not near_embedded: if '$maxDistance' in value_dict: value_son['$maxDistance'] = value_dict[ '$maxDistance'] if '$minDistance' in value_dict: value_son['$minDistance'] = value_dict[ '$minDistance'] mongo_query[key] = value_son else: # Store for manually merging later merge_query[key].append(value) # The queryset has been filter in such a way we must manually merge for k, v in merge_query.items(): merge_query[k].append(mongo_query[k]) del mongo_query[k] if isinstance(v, list): value = [{k: val} for val in v] if '$and' in mongo_query.keys(): mongo_query['$and'].extend(value) else: mongo_query['$and'] = value return mongo_query
def from_json(cls, json_str, created=False, *args, **kwargs): # Proposition: add a private method like from_json that allows # dictionaries to be used as inputs to avoid having to use # loads(dumps(data)) all the time """ Decode from human-readable json. Parameters: json_str: JSON string that should be passed to the serialized created: a parameter that is passed to cls._from_son. *args, **kwargs: Any additional arguments that is passed to json.loads. """ from .fields import FollowReferenceField kwargs.setdefault("object_hook", generate_object_hook(cls)) dct = json.loads(json_str, *args, **kwargs) for name, fld in cls._fields.items(): if any([ getattr(fld, "exclude_from_json", None), getattr(fld, "exclude_json", None) ]): dct.pop(name, None) from_son_result = cls._from_son(SON(dct), created=created) atLeastOneReference = False for fldname, fld in cls._fields.items(): if isinstance(fld, db.ListField): target = fld.field if not isinstance(target, db.ReferenceField) or \ isinstance(target, FollowReferenceField): continue atLeastOneReference = True values = dct.get(fldname) setattr(from_son_result, fldname, []) for value in values: valueDoc = value.as_doc() if 'id' not in valueDoc['$id']: valueDoc['$id']['id'] = str(ObjectId()) getattr(from_son_result, fldname).append( target.document_type_obj.from_json( json.dumps(value.as_doc()['$id']))) elif isinstance(fld, db.DictField): target = fld.field if not isinstance(target, db.ReferenceField) or \ isinstance(target, FollowReferenceField): continue atLeastOneReference = True values = dct.get(fldname) setattr(from_son_result, fldname, {}) for k, value in values.items(): valueDoc = value.as_doc() if 'id' not in valueDoc['$id']: valueDoc['$id']['id'] = str(ObjectId()) getattr(from_son_result, fldname)[k] = \ target.document_type_obj.from_json( json.dumps(valueDoc['$id'])) else: target = fld if not isinstance(target, db.ReferenceField) or \ isinstance(target, FollowReferenceField): continue atLeastOneReference = True value = dct.get(fldname) try: valueDoc = value.as_doc() # If there is no ID in the JSON (aka the JSON was not saved # from mongoengine but rather created manually), create an # ObjectId on the fly if 'id' not in valueDoc['$id']: valueDoc['$id']['id'] = ObjectId() valueDoc['$id']['id'] = str(valueDoc['$id']['id']) setattr( from_son_result, fldname, target.document_type_obj.from_json( json.dumps(valueDoc['$id']))) except TypeError: setattr( from_son_result, fldname, normalize_reference(getattr(value, "id", value), target)) # All fields have been changed, because the document was loaded from a # JSON file. However, mongoengine does not detect it automatically. In # order for all fields to be saved, we set the _changed_fields variable # manually from_son_result._changed_fields = list(cls._fields.keys()) if atLeastOneReference: # If the document contains at least one reference, override the # save() method to save the referenced documents at the same time # as the master document. Otherwise, the referenced documents would # not be saved and the document would not be valid anymore after a # save and load from the database. def save(self, *args, **kwargs): for fldname, fld in cls._fields.items(): if isinstance(fld, (db.ReferenceField, FollowReferenceField)): getattr(self, fldname).save(*args, **kwargs) elif isinstance(fld, db.fields.ComplexBaseField): isReferences = isinstance( fld.field, (db.ReferenceField, FollowReferenceField)) if isinstance(fld, db.DictField) and isReferences: field = getattr(self, fldname) for key, value in field.items(): field[key].save(*args, **kwargs) elif isinstance(fld, db.ListField) and isReferences: field = getattr(self, fldname) for valueIndex in range(len(field)): field[valueIndex].save(*args, **kwargs) super(self.__class__, self).save(*args, **kwargs) from_son_result.save = types.MethodType(save, from_son_result) return from_son_result
def select(): # 查询 文档中的第一条数据 print(site.find_one()) # 查询集合中所有数据 for s in site.find(): print(s) # where output(site.find_one({'_id': ObjectId('5cb73399efdf11190e096721')})) # 嵌入doc cursor = inventory.find( {"size": SON([("h", 14), ("w", 21), ("uom", "cm")])}) inventory.find({"size.uom": "in"}) # and output(site.find_one({ 'name': 'RUNOOB', 'alexa': '100003', })) query = {"name": {"$lt": "H"}} mydoc = site.find(query) for x in mydoc: print(x) # 字段限制 or select xx .limit(1).skip(2) => limit(2,1) query = {"$or": [{'alexa': '101'}, {'alexa': '12345'}]} output(site.find(query, {'_id': 0}).limit(1).skip(2)) # order by 1 为升序排列,而 -1 是用于降序排列。 for x in site.find().sort([('alexa', pymongo.DESCENDING), ('name', pymongo.ASCENDING)]): print(x) # in query = {'name': {'$in': ['Facebook', 'Taobao']}} for x in site.find(query): print(x) # array # 严格 有 顺序一样 inventory.find({"tags": ["red", "blank"]}) # 包含这两个元素 不考虑数组中的顺序或其他元素 inventory.find({"tags": {"$all": ["red", "blank"]}}) # 包含 inventory.find({"tags": "red"}) # 数组位置2 cursor = db.inventory.find({"dim_cm.1": {"$gt": 25}}) # 长度 cursor = db.inventory.find({"tags": {"$size": 3}}) # 数组嵌入 cursor = db.inventory.find({'instock.0.qty': {"$lte": 20}}) cursor = db.inventory.find({"instock.qty": {"$gt": 10, "$lte": 20}}) # 聚合 query = [ # {'$match': {'a': 1}}, { '$group': { '_id': "$name", 'a': { '$sum': 1 }, 'b': { '$max': '$alexa' }, } }, { '$sort': { 'a': pymongo.ASCENDING } }, { '$sort': { 'b': pymongo.DESCENDING } }, { '$limit': 20 }, { '$match': { 'a': 1 } }, ] # select _id, sum(*) as a, max(alexa) as b from xx group by name as _id having a=1 order by a asce, b desc for x in site.aggregate(query): print(x)
"$match": { "when": { "$gte": "$from", "$lte": "$to" }, "workflow.state": "$state" } }, { "$group": { "_id": "$type", "count": { "$sum": 1 } } }, { "$sort": SON([("count", -1)]) }] } } } aggregate_types_discipline = { 'item_title': 'Observation Aggregations by discipline and types', 'url': '{}/aggregate/discipline'.format(BASE_URL), 'datasource': { 'source': RESOURCE_COLLECTION, 'aggregation': { 'pipeline': [{ "$unwind": "$type" }, { "$match": {
# costs.append(neighbor['properties']['price']) # median_costs[doc['properties']['listing_id']]= stats(costs) for doc in collection.find(): i = i + 1 print i costs = [] for neighbor in collection.find({ "properties.bedrooms": doc['properties']['bedrooms'], "properties.bathrooms": doc['properties']['bathrooms'], "geometry": SON([("$near", { "$geometry": SON([("type", "Point"), ("coordinates", doc['geometry']['coordinates'])]) })]) }).limit(30): costs.append(neighbor['properties']['price']) median_costs[doc['properties']['listing_id']] = stats( costs, doc['properties']['price']) t2 = time() print("Run Time:", t2 - t1) neighborhood_values = pd.DataFrame.from_dict(median_costs, orient='index') neighborhood_values.columns = [ '0_per_30', '10_per_30', '20_per_30', '30_per_30', '40_per_30', '50_per_30', '60_per_30', '70_per_30', '80_per_30', '90_per_30', '100_per_30', 'median_30', 'mean_30'
def __init__(self, oscontrol, domain, operation, async): SON.__init__(self) self['$type'] = operation self['_domain'] = domain self['_asyncrequest'] = async
class BaseDocument(object): __slots__ = ('_changed_fields', '_initialised', '_created', '_data', '_dynamic_fields', '_auto_id_field', '_db_field_map', '__weakref__') _dynamic = False _dynamic_lock = True STRICT = False def __init__(self, *args, **values): """ Initialise a document or embedded document :param __auto_convert: Try and will cast python objects to Object types :param values: A dictionary of values for the document """ self._initialised = False self._created = True if args: # Combine positional arguments with named arguments. # We only want named arguments. field = iter(self._fields_ordered) # If its an automatic id field then skip to the first defined field if getattr(self, '_auto_id_field', False): next(field) for value in args: name = next(field) if name in values: raise TypeError( 'Multiple values for keyword argument "%s"' % name) values[name] = value __auto_convert = values.pop('__auto_convert', True) # 399: set default values only to fields loaded from DB __only_fields = set(values.pop('__only_fields', values)) _created = values.pop('_created', True) signals.pre_init.send(self.__class__, document=self, values=values) # Check if there are undefined fields supplied to the constructor, # if so raise an Exception. if not self._dynamic and (self._meta.get('strict', True) or _created): _undefined_fields = set( values.keys()) - set(self._fields.keys() + ['id', 'pk', '_cls', '_text_score']) if _undefined_fields: msg = ('The fields "{0}" do not exist on the document "{1}"' ).format(_undefined_fields, self._class_name) raise FieldDoesNotExist(msg) if self.STRICT and not self._dynamic: self._data = StrictDict.create(allowed_keys=self._fields_ordered)() else: self._data = {} self._dynamic_fields = SON() # Assign default values to instance for key, field in self._fields.iteritems(): if self._db_field_map.get(key, key) in __only_fields: continue value = getattr(self, key, None) setattr(self, key, value) if '_cls' not in values: self._cls = self._class_name # Set passed values after initialisation if self._dynamic: dynamic_data = {} for key, value in values.iteritems(): if key in self._fields or key == '_id': setattr(self, key, value) else: dynamic_data[key] = value else: FileField = _import_class('FileField') for key, value in values.iteritems(): key = self._reverse_db_field_map.get(key, key) if key in self._fields or key in ('id', 'pk', '_cls'): if __auto_convert and value is not None: field = self._fields.get(key) if field and not isinstance(field, FileField): value = field.to_python(value) setattr(self, key, value) else: self._data[key] = value # Set any get_<field>_display methods self.__set_field_display() if self._dynamic: self._dynamic_lock = False for key, value in dynamic_data.iteritems(): setattr(self, key, value) # Flag initialised self._initialised = True self._created = _created signals.post_init.send(self.__class__, document=self) def __delattr__(self, *args, **kwargs): """Handle deletions of fields""" field_name = args[0] if field_name in self._fields: default = self._fields[field_name].default if callable(default): default = default() setattr(self, field_name, default) else: super(BaseDocument, self).__delattr__(*args, **kwargs) def __setattr__(self, name, value): # Handle dynamic data only if an initialised dynamic document if self._dynamic and not self._dynamic_lock: if not hasattr(self, name) and not name.startswith('_'): DynamicField = _import_class('DynamicField') field = DynamicField(db_field=name, null=True) field.name = name self._dynamic_fields[name] = field self._fields_ordered += (name, ) if not name.startswith('_'): value = self.__expand_dynamic_values(name, value) # Handle marking data as changed if name in self._dynamic_fields: self._data[name] = value if hasattr(self, '_changed_fields'): self._mark_as_changed(name) try: self__created = self._created except AttributeError: self__created = True if (self._is_document and not self__created and name in self._meta.get('shard_key', tuple()) and self._data.get(name) != value): msg = 'Shard Keys are immutable. Tried to update %s' % name raise OperationError(msg) try: self__initialised = self._initialised except AttributeError: self__initialised = False # Check if the user has created a new instance of a class if (self._is_document and self__initialised and self__created and name == self._meta.get('id_field')): super(BaseDocument, self).__setattr__('_created', False) super(BaseDocument, self).__setattr__(name, value) def __getstate__(self): data = {} for k in ('_changed_fields', '_initialised', '_created', '_dynamic_fields', '_fields_ordered'): if hasattr(self, k): data[k] = getattr(self, k) data['_data'] = self.to_mongo() return data def __setstate__(self, data): if isinstance(data['_data'], SON): data['_data'] = self.__class__._from_son(data['_data'])._data for k in ('_changed_fields', '_initialised', '_created', '_data', '_dynamic_fields'): if k in data: setattr(self, k, data[k]) if '_fields_ordered' in data: if self._dynamic: setattr(self, '_fields_ordered', data['_fields_ordered']) else: _super_fields_ordered = type(self)._fields_ordered setattr(self, '_fields_ordered', _super_fields_ordered) dynamic_fields = data.get('_dynamic_fields') or SON() for k in dynamic_fields.keys(): setattr(self, k, data['_data'].get(k)) def __iter__(self): return iter(self._fields_ordered) def __getitem__(self, name): """Dictionary-style field access, return a field's value if present. """ try: if name in self._fields_ordered: return getattr(self, name) except AttributeError: pass raise KeyError(name) def __setitem__(self, name, value): """Dictionary-style field access, set a field's value. """ # Ensure that the field exists before settings its value if not self._dynamic and name not in self._fields: raise KeyError(name) return setattr(self, name, value) def __contains__(self, name): try: val = getattr(self, name) return val is not None except AttributeError: return False def __len__(self): return len(self._data) def __repr__(self): try: u = self.__str__() except (UnicodeEncodeError, UnicodeDecodeError): u = '[Bad Unicode data]' repr_type = str if u is None else type(u) return repr_type('<%s: %s>' % (self.__class__.__name__, u)) def __str__(self): # TODO this could be simpler? if hasattr(self, '__unicode__'): if six.PY3: return self.__unicode__() else: return six.text_type(self).encode('utf-8') return six.text_type('%s object' % self.__class__.__name__) def __eq__(self, other): if isinstance(other, self.__class__) and hasattr( other, 'id') and other.id is not None: return self.id == other.id if isinstance(other, DBRef): return self._get_collection_name( ) == other.collection and self.id == other.id if self.id is None: return self is other return False def __ne__(self, other): return not self.__eq__(other) def clean(self): """ Hook for doing document level data cleaning before validation is run. Any ValidationError raised by this method will not be associated with a particular field; it will have a special-case association with the field defined by NON_FIELD_ERRORS. """ pass def get_text_score(self): """ Get text score from text query """ if '_text_score' not in self._data: raise InvalidDocumentError( 'This document is not originally built from a text query') return self._data['_text_score'] def to_mongo(self, use_db_field=True, fields=None): """ Return as SON data ready for use with MongoDB. """ if not fields: fields = [] data = SON() data['_id'] = None data['_cls'] = self._class_name # only root fields ['test1.a', 'test2'] => ['test1', 'test2'] root_fields = {f.split('.')[0] for f in fields} for field_name in self: if root_fields and field_name not in root_fields: continue value = self._data.get(field_name, None) field = self._fields.get(field_name) if field is None and self._dynamic: field = self._dynamic_fields.get(field_name) if value is not None: f_inputs = field.to_mongo.__code__.co_varnames ex_vars = {} if fields and 'fields' in f_inputs: key = '%s.' % field_name embedded_fields = [ i.replace(key, '') for i in fields if i.startswith(key) ] ex_vars['fields'] = embedded_fields if 'use_db_field' in f_inputs: ex_vars['use_db_field'] = use_db_field value = field.to_mongo(value, **ex_vars) # Handle self generating fields if value is None and field._auto_gen: value = field.generate() self._data[field_name] = value if (value is not None) or (field.null): if use_db_field: data[field.db_field] = value else: data[field.name] = value # Only add _cls if allow_inheritance is True if not self._meta.get('allow_inheritance'): data.pop('_cls') return data def validate(self, clean=True): """Ensure that all fields' values are valid and that required fields are present. """ # Ensure that each field is matched to a valid value errors = {} if clean: try: self.clean() except ValidationError as error: errors[NON_FIELD_ERRORS] = error # Get a list of tuples of field names and their current values fields = [(self._fields.get(name, self._dynamic_fields.get(name)), self._data.get(name)) for name in self._fields_ordered] EmbeddedDocumentField = _import_class('EmbeddedDocumentField') GenericEmbeddedDocumentField = _import_class( 'GenericEmbeddedDocumentField') for field, value in fields: if value is not None: try: if isinstance( field, (EmbeddedDocumentField, GenericEmbeddedDocumentField)): field._validate(value, clean=clean) else: field._validate(value) except ValidationError as error: errors[field.name] = error.errors or error except (ValueError, AttributeError, AssertionError) as error: errors[field.name] = error elif field.required and not getattr(field, '_auto_gen', False): errors[field.name] = ValidationError('Field is required', field_name=field.name) if errors: pk = 'None' if hasattr(self, 'pk'): pk = self.pk elif self._instance and hasattr(self._instance, 'pk'): pk = self._instance.pk message = 'ValidationError (%s:%s) ' % (self._class_name, pk) raise ValidationError(message, errors=errors) def to_json(self, *args, **kwargs): """Convert this document to JSON. :param use_db_field: Serialize field names as they appear in MongoDB (as opposed to attribute names on this document). Defaults to True. """ use_db_field = kwargs.pop('use_db_field', True) return json_util.dumps(self.to_mongo(use_db_field), *args, **kwargs) @classmethod def from_json(cls, json_data, created=False): """Converts json data to a Document instance :param json_data: The json data to load into the Document :param created: If True, the document will be considered as a brand new document If False and an id is provided, it will consider that the data being loaded corresponds to what's already in the database (This has an impact of subsequent call to .save()) If False and no id is provided, it will consider the data as a new document (default ``False``) """ return cls._from_son(json_util.loads(json_data), created=created) def __expand_dynamic_values(self, name, value): """Expand any dynamic values to their correct types / values.""" if not isinstance(value, (dict, list, tuple)): return value # If the value is a dict with '_cls' in it, turn it into a document is_dict = isinstance(value, dict) if is_dict and '_cls' in value: cls = get_document(value['_cls']) return cls(**value) if is_dict: value = { k: self.__expand_dynamic_values(k, v) for k, v in value.items() } else: value = [self.__expand_dynamic_values(name, v) for v in value] # Convert lists / values so we can watch for any changes on them EmbeddedDocumentListField = _import_class('EmbeddedDocumentListField') if (isinstance(value, (list, tuple)) and not isinstance(value, BaseList)): if issubclass(type(self), EmbeddedDocumentListField): value = EmbeddedDocumentList(value, self, name) else: value = BaseList(value, self, name) elif isinstance(value, dict) and not isinstance(value, BaseDict): value = BaseDict(value, self, name) return value def _mark_as_changed(self, key): """Mark a key as explicitly changed by the user.""" if not key: return if not hasattr(self, '_changed_fields'): return if '.' in key: key, rest = key.split('.', 1) key = self._db_field_map.get(key, key) key = '%s.%s' % (key, rest) else: key = self._db_field_map.get(key, key) if key not in self._changed_fields: levels, idx = key.split('.'), 1 while idx <= len(levels): if '.'.join(levels[:idx]) in self._changed_fields: break idx += 1 else: self._changed_fields.append(key) # remove lower level changed fields level = '.'.join(levels[:idx]) + '.' remove = self._changed_fields.remove for field in self._changed_fields[:]: if field.startswith(level): remove(field) def _clear_changed_fields(self): """Using _get_changed_fields iterate and remove any fields that are marked as changed. """ for changed in self._get_changed_fields(): parts = changed.split('.') data = self for part in parts: if isinstance(data, list): try: data = data[int(part)] except IndexError: data = None elif isinstance(data, dict): data = data.get(part, None) else: data = getattr(data, part, None) if not isinstance(data, LazyReference) and hasattr( data, '_changed_fields'): if getattr(data, '_is_document', False): continue data._changed_fields = [] self._changed_fields = [] def _nestable_types_changed_fields(self, changed_fields, base_key, data): """Inspect nested data for changed fields :param changed_fields: Previously collected changed fields :param base_key: The base key that must be used to prepend changes to this data :param data: data to inspect for changes """ # Loop list / dict fields as they contain documents # Determine the iterator to use if not hasattr(data, 'items'): iterator = enumerate(data) else: iterator = data.iteritems() for index_or_key, value in iterator: item_key = '%s%s.' % (base_key, index_or_key) # don't check anything lower if this key is already marked # as changed. if item_key[:-1] in changed_fields: continue if hasattr(value, '_get_changed_fields'): changed = value._get_changed_fields() changed_fields += [ '%s%s' % (item_key, k) for k in changed if k ] elif isinstance(value, (list, tuple, dict)): self._nestable_types_changed_fields(changed_fields, item_key, value) def _get_changed_fields(self): """Return a list of all fields that have explicitly been changed. """ EmbeddedDocument = _import_class('EmbeddedDocument') ReferenceField = _import_class('ReferenceField') GenericReferenceField = _import_class('GenericReferenceField') SortedListField = _import_class('SortedListField') changed_fields = [] changed_fields += getattr(self, '_changed_fields', []) for field_name in self._fields_ordered: db_field_name = self._db_field_map.get(field_name, field_name) key = '%s.' % db_field_name data = self._data.get(field_name, None) field = self._fields.get(field_name) if db_field_name in changed_fields: # Whole field already marked as changed, no need to go further continue if isinstance(field, ReferenceField): # Don't follow referenced documents continue if isinstance(data, EmbeddedDocument): # Find all embedded fields that have been changed changed = data._get_changed_fields() changed_fields += ['%s%s' % (key, k) for k in changed if k] elif isinstance(data, (list, tuple, dict)): if (hasattr(field, 'field') and isinstance( field.field, (ReferenceField, GenericReferenceField))): continue elif isinstance(field, SortedListField) and field._ordering: # if ordering is affected whole list is changed if any(field._ordering in d._changed_fields for d in data): changed_fields.append(db_field_name) continue self._nestable_types_changed_fields(changed_fields, key, data) return changed_fields def _delta(self): """Returns the delta (set, unset) of the changes for a document. Gets any values that have been explicitly changed. """ # Handles cases where not loaded from_son but has _id doc = self.to_mongo() set_fields = self._get_changed_fields() unset_data = {} if hasattr(self, '_changed_fields'): set_data = {} # Fetch each set item from its path for path in set_fields: parts = path.split('.') d = doc new_path = [] for p in parts: if isinstance(d, (ObjectId, DBRef)): # Don't dig in the references break elif isinstance(d, list) and p.isdigit(): # An item of a list (identified by its index) is updated d = d[int(p)] elif hasattr(d, 'get'): # dict-like (dict, embedded document) d = d.get(p) new_path.append(p) path = '.'.join(new_path) set_data[path] = d else: set_data = doc if '_id' in set_data: del set_data['_id'] # Determine if any changed items were actually unset. for path, value in set_data.items(): if value or isinstance( value, (numbers.Number, bool)): # Account for 0 and True that are truthy continue parts = path.split('.') if (self._dynamic and len(parts) and parts[0] in self._dynamic_fields): del set_data[path] unset_data[path] = 1 continue # If we've set a value that ain't the default value don't unset it. default = None if path in self._fields: default = self._fields[path].default else: # Perform a full lookup for lists / embedded lookups d = self db_field_name = parts.pop() for p in parts: if isinstance(d, list) and p.isdigit(): d = d[int(p)] elif (hasattr(d, '__getattribute__') and not isinstance(d, dict)): real_path = d._reverse_db_field_map.get(p, p) d = getattr(d, real_path) else: d = d.get(p) if hasattr(d, '_fields'): field_name = d._reverse_db_field_map.get( db_field_name, db_field_name) if field_name in d._fields: default = d._fields.get(field_name).default else: default = None if default is not None: default = default() if callable(default) else default if value != default: continue del set_data[path] unset_data[path] = 1 return set_data, unset_data @classmethod def _get_collection_name(cls): """Return the collection name for this class. None for abstract class. """ return cls._meta.get('collection', None) @classmethod def _from_son(cls, son, _auto_dereference=True, only_fields=None, created=False): """Create an instance of a Document (subclass) from a PyMongo SON. """ if not only_fields: only_fields = [] if son and not isinstance(son, dict): raise ValueError( "The source SON object needs to be of type 'dict'") # Get the class name from the document, falling back to the given # class if unavailable class_name = son.get('_cls', cls._class_name) # Convert SON to a data dict, making sure each key is a string and # corresponds to the right db field. data = {} for key, value in son.iteritems(): key = str(key) key = cls._db_field_map.get(key, key) data[key] = value # Return correct subclass for document type if class_name != cls._class_name: cls = get_document(class_name) changed_fields = [] errors_dict = {} fields = cls._fields if not _auto_dereference: fields = copy.deepcopy(fields) for field_name, field in fields.iteritems(): field._auto_dereference = _auto_dereference if field.db_field in data: value = data[field.db_field] try: data[field_name] = (value if value is None else field.to_python(value)) if field_name != field.db_field: del data[field.db_field] except (AttributeError, ValueError) as e: errors_dict[field_name] = e if errors_dict: errors = '\n'.join( ['%s - %s' % (k, v) for k, v in errors_dict.items()]) msg = ('Invalid data to create a `%s` instance.\n%s' % (cls._class_name, errors)) raise InvalidDocumentError(msg) # In STRICT documents, remove any keys that aren't in cls._fields if cls.STRICT: data = {k: v for k, v in data.iteritems() if k in cls._fields} obj = cls(__auto_convert=False, _created=created, __only_fields=only_fields, **data) obj._changed_fields = changed_fields if not _auto_dereference: obj._fields = fields return obj @classmethod def _build_index_specs(cls, meta_indexes): """Generate and merge the full index specs.""" geo_indices = cls._geo_indices() unique_indices = cls._unique_with_indexes() index_specs = [cls._build_index_spec(spec) for spec in meta_indexes] def merge_index_specs(index_specs, indices): """Helper method for merging index specs.""" if not indices: return index_specs # Create a map of index fields to index spec. We're converting # the fields from a list to a tuple so that it's hashable. spec_fields = { tuple(index['fields']): index for index in index_specs } # For each new index, if there's an existing index with the same # fields list, update the existing spec with all data from the # new spec. for new_index in indices: candidate = spec_fields.get(tuple(new_index['fields'])) if candidate is None: index_specs.append(new_index) else: candidate.update(new_index) return index_specs # Merge geo indexes and unique_with indexes into the meta index specs. index_specs = merge_index_specs(index_specs, geo_indices) index_specs = merge_index_specs(index_specs, unique_indices) return index_specs @classmethod def _build_index_spec(cls, spec): """Build a PyMongo index spec from a MongoEngine index spec.""" if isinstance(spec, six.string_types): spec = {'fields': [spec]} elif isinstance(spec, (list, tuple)): spec = {'fields': list(spec)} elif isinstance(spec, dict): spec = dict(spec) index_list = [] direction = None # Check to see if we need to include _cls allow_inheritance = cls._meta.get('allow_inheritance') include_cls = (allow_inheritance and not spec.get('sparse', False) and spec.get('cls', True) and '_cls' not in spec['fields']) # 733: don't include cls if index_cls is False unless there is an explicit cls with the index include_cls = include_cls and (spec.get('cls', False) or cls._meta.get('index_cls', True)) if 'cls' in spec: spec.pop('cls') for key in spec['fields']: # If inherited spec continue if isinstance(key, (list, tuple)): continue # ASCENDING from + # DESCENDING from - # TEXT from $ # HASHED from # # GEOSPHERE from ( # GEOHAYSTACK from ) # GEO2D from * direction = pymongo.ASCENDING if key.startswith('-'): direction = pymongo.DESCENDING elif key.startswith('$'): direction = pymongo.TEXT elif key.startswith('#'): direction = pymongo.HASHED elif key.startswith('('): direction = pymongo.GEOSPHERE elif key.startswith(')'): direction = pymongo.GEOHAYSTACK elif key.startswith('*'): direction = pymongo.GEO2D if key.startswith(('+', '-', '*', '$', '#', '(', ')')): key = key[1:] # Use real field name, do it manually because we need field # objects for the next part (list field checking) parts = key.split('.') if parts in (['pk'], ['id'], ['_id']): key = '_id' else: fields = cls._lookup_field(parts) parts = [] for field in fields: try: if field != '_id': field = field.db_field except AttributeError: pass parts.append(field) key = '.'.join(parts) index_list.append((key, direction)) # Don't add cls to a geo index if include_cls and direction not in (pymongo.GEO2D, pymongo.GEOHAYSTACK, pymongo.GEOSPHERE): index_list.insert(0, ('_cls', 1)) if index_list: spec['fields'] = index_list return spec @classmethod def _unique_with_indexes(cls, namespace=''): """Find unique indexes in the document schema and return them.""" unique_indexes = [] for field_name, field in cls._fields.items(): sparse = field.sparse # Generate a list of indexes needed by uniqueness constraints if field.unique: unique_fields = [field.db_field] # Add any unique_with fields to the back of the index spec if field.unique_with: if isinstance(field.unique_with, six.string_types): field.unique_with = [field.unique_with] # Convert unique_with field names to real field names unique_with = [] for other_name in field.unique_with: parts = other_name.split('.') # Lookup real name parts = cls._lookup_field(parts) name_parts = [part.db_field for part in parts] unique_with.append('.'.join(name_parts)) # Unique field should be required parts[-1].required = True sparse = (not sparse and parts[-1].name not in cls.__dict__) unique_fields += unique_with # Add the new index to the list fields = [('%s%s' % (namespace, f), pymongo.ASCENDING) for f in unique_fields] index = {'fields': fields, 'unique': True, 'sparse': sparse} unique_indexes.append(index) if field.__class__.__name__ == 'ListField': field = field.field # Grab any embedded document field unique indexes if (field.__class__.__name__ == 'EmbeddedDocumentField' and field.document_type != cls): field_namespace = '%s.' % field_name doc_cls = field.document_type unique_indexes += doc_cls._unique_with_indexes(field_namespace) return unique_indexes @classmethod def _geo_indices(cls, inspected=None, parent_field=None): inspected = inspected or [] geo_indices = [] inspected.append(cls) geo_field_type_names = ('EmbeddedDocumentField', 'GeoPointField', 'PointField', 'LineStringField', 'PolygonField') geo_field_types = tuple( [_import_class(field) for field in geo_field_type_names]) for field in cls._fields.values(): if not isinstance(field, geo_field_types): continue if hasattr(field, 'document_type'): field_cls = field.document_type if field_cls in inspected: continue if hasattr(field_cls, '_geo_indices'): geo_indices += field_cls._geo_indices( inspected, parent_field=field.db_field) elif field._geo_index: field_name = field.db_field if parent_field: field_name = '%s.%s' % (parent_field, field_name) geo_indices.append( {'fields': [(field_name, field._geo_index)]}) return geo_indices @classmethod def _lookup_field(cls, parts): """Given the path to a given field, return a list containing the Field object associated with that field and all of its parent Field objects. Args: parts (str, list, or tuple) - path to the field. Should be a string for simple fields existing on this document or a list of strings for a field that exists deeper in embedded documents. Returns: A list of Field instances for fields that were found or strings for sub-fields that weren't. Example: >>> user._lookup_field('name') [<mongoengine.fields.StringField at 0x1119bff50>] >>> user._lookup_field('roles') [<mongoengine.fields.EmbeddedDocumentListField at 0x1119ec250>] >>> user._lookup_field(['roles', 'role']) [<mongoengine.fields.EmbeddedDocumentListField at 0x1119ec250>, <mongoengine.fields.StringField at 0x1119ec050>] >>> user._lookup_field('doesnt_exist') raises LookUpError >>> user._lookup_field(['roles', 'doesnt_exist']) [<mongoengine.fields.EmbeddedDocumentListField at 0x1119ec250>, 'doesnt_exist'] """ # TODO this method is WAY too complicated. Simplify it. # TODO don't think returning a string for embedded non-existent fields is desired ListField = _import_class('ListField') DynamicField = _import_class('DynamicField') if not isinstance(parts, (list, tuple)): parts = [parts] fields = [] field = None for field_name in parts: # Handle ListField indexing: if field_name.isdigit() and isinstance(field, ListField): fields.append(field_name) continue # Look up first field from the document if field is None: if field_name == 'pk': # Deal with "primary key" alias field_name = cls._meta['id_field'] if field_name in cls._fields: field = cls._fields[field_name] elif cls._dynamic: field = DynamicField(db_field=field_name) elif cls._meta.get('allow_inheritance') or cls._meta.get( 'abstract', False): # 744: in case the field is defined in a subclass for subcls in cls.__subclasses__(): try: field = subcls._lookup_field([field_name])[0] except LookUpError: continue if field is not None: break else: raise LookUpError('Cannot resolve field "%s"' % field_name) else: raise LookUpError('Cannot resolve field "%s"' % field_name) else: ReferenceField = _import_class('ReferenceField') GenericReferenceField = _import_class('GenericReferenceField') # If previous field was a reference, throw an error (we # cannot look up fields that are on references). if isinstance(field, (ReferenceField, GenericReferenceField)): raise LookUpError('Cannot perform join in mongoDB: %s' % '__'.join(parts)) # If the parent field has a "field" attribute which has a # lookup_member method, call it to find the field # corresponding to this iteration. if hasattr(getattr(field, 'field', None), 'lookup_member'): new_field = field.field.lookup_member(field_name) # If the parent field is a DynamicField or if it's part of # a DynamicDocument, mark current field as a DynamicField # with db_name equal to the field name. elif cls._dynamic and (isinstance(field, DynamicField) or getattr( getattr(field, 'document_type', None), '_dynamic', None)): new_field = DynamicField(db_field=field_name) # Else, try to use the parent field's lookup_member method # to find the subfield. elif hasattr(field, 'lookup_member'): new_field = field.lookup_member(field_name) # Raise a LookUpError if all the other conditions failed. else: raise LookUpError('Cannot resolve subfield or operator {} ' 'on the field {}'.format( field_name, field.name)) # If current field still wasn't found and the parent field # is a ComplexBaseField, add the name current field name and # move on. if not new_field and isinstance(field, ComplexBaseField): fields.append(field_name) continue elif not new_field: raise LookUpError('Cannot resolve field "%s"' % field_name) field = new_field # update field to the new field type fields.append(field) return fields @classmethod def _translate_field_name(cls, field, sep='.'): """Translate a field attribute name to a database field name. """ parts = field.split(sep) parts = [f.db_field for f in cls._lookup_field(parts)] return '.'.join(parts) def __set_field_display(self): """For each field that specifies choices, create a get_<field>_display method. """ fields_with_choices = [(n, f) for n, f in self._fields.items() if f.choices] for attr_name, field in fields_with_choices: setattr(self, 'get_%s_display' % attr_name, partial(self.__get_field_display, field=field)) def __get_field_display(self, field): """Return the display value for a choice field""" value = getattr(self, field.name) if field.choices and isinstance(field.choices[0], (list, tuple)): if value is None: return None sep = getattr(field, 'display_sep', ' ') values = value if field.__class__.__name__ in ( 'ListField', 'SortedListField') else [value] return sep.join([ six.text_type(dict(field.choices).get(val, val)) for val in values or [] ]) return value
def evaluate(cls, config, ctrl): time_limit = time.time() + 60 * 60 # 1hr from now rval = SON(dbn_train_fn_version=1) ctrl.info('starting dbn_train_fn') kv = config.items() kv.sort() for k, v in kv: ctrl.info('key=%s\t%s' % (k, str(v))) rng = numpy.random.RandomState(config['iseed']) s_rng = RandomStreams(int(rng.randint(2**30))) dataset, train_Xy, valid_Xy, test_Xy = preprocess_data(config, ctrl) # allocate learning function parameters s_inputs_all = tensor.fmatrix('inputs') s_labels_all = tensor.ivector('labels') s_idx = tensor.lscalar('batch_idx') s_batchsize = tensor.lscalar('batch_size') s_low = s_idx * s_batchsize s_high = s_low + s_batchsize s_inputs = s_inputs_all[s_low:s_high] s_labels = s_labels_all[s_low:s_high] s_lr = tensor.scalar('lr') s_features = s_inputs # s_features will be modified in the model-building loop weights = [] vbiases = [] hbiases = [] n_inputs_i = valid_Xy[0].get_value(borrow=True).shape[1] rval['cd_reports'] = [] try: layer_config = config['next_layer'] # allocate model parameters while layer_config: i = len(rval['cd_reports']) n_hid_i = layer_config['n_hid'] if layer_config['W_init_dist'] == 'uniform': W = rng.uniform(low=-1, high=1, size=(n_hid_i, n_inputs_i)).T.astype('float32') elif layer_config['W_init_dist'] == 'normal': W = rng.randn(n_hid_i, n_inputs_i).T.astype('float32') else: raise ValueError('W_init_dist', layer_config['W_init_dist']) if layer_config['W_init_algo'] == 'old': #N.B. the weights are transposed so that as the number of hidden units changes, # the first hidden units are always the same vectors. # this makes it easier to isolate the effect of random initialization # from the other hyper-parameters under review W *= layer_config[ 'W_init_algo_old_multiplier'] / numpy.sqrt(n_inputs_i) elif layer_config['W_init_algo'] == 'Xavier': W *= numpy.sqrt(6.0 / (n_inputs_i + n_hid_i)) else: raise ValueError(layer_config['W_init_algo']) layer_idx = len(rval['cd_reports']) weights.append(theano.shared(W, 'W_%i' % layer_idx)) hbiases.append( theano.shared(numpy.zeros(n_hid_i, dtype='float32'), 'h_%i' % layer_idx)) vbiases.append( theano.shared(numpy.zeros(n_inputs_i, dtype='float32'), 'v_%i' % layer_idx)) del W # allocate RBM training function for this layer # this version re-calculates the training set every time # TODO: cache the training set for each layer # TODO: consider sparsity? # TODO: consider momentum? if layer_config['cd_epochs']: cd_report = train_rbm( s_rng, s_idx, s_batchsize, s_features, W=weights[-1], vbias=vbiases[-1], hbias=hbiases[-1], n_in=n_inputs_i, n_hid=n_hid_i, batchsize=layer_config['cd_batchsize'], sample_v0s=layer_config['cd_sample_v0s'], cdlr=layer_config['cd_lr'] / float(layer_config['cd_batchsize']), n_epochs=layer_config['cd_epochs'], n_batches_per_epoch=dataset.descr['n_train'] // layer_config['cd_batchsize'], lr_anneal_start=layer_config['cd_lr_anneal_start'], givens={ s_inputs_all: tensor.as_tensor_variable(train_Xy[0]) }, time_limit=time_limit) else: cd_report = None rval['cd_reports'].append(cd_report) # update s_features to point to top layer s_features = tensor.nnet.sigmoid( tensor.dot(s_features, weights[-1]) + hbiases[-1]) n_inputs_i = n_hid_i layer_config = layer_config.get('next_layer', None) except (MemoryError, ): rval['abort'] = 'MemoryError' rval['status'] = 'ok' rval['loss'] = 1.0 rval['best_epoch_valid'] = 0.0 return rval # allocate model logreg = LogisticRegression.new(s_features, n_in=n_inputs_i, n_out=dataset.descr['n_classes']) traincost = logreg.nll(s_labels).mean() def ssq(X): return (X**2).sum() traincost = traincost + config['l2_penalty'] * ( sum([ssq(w_i) for w_i in weights]) + ssq(logreg.w)) # params = weights+hbiases+vbiases+logreg.params # vbiases are not involved in the supervised network params = weights + hbiases + logreg.params train_logreg_fn = theano.function( [s_idx, s_lr], [logreg.nll(s_labels).mean()], updates=sgd_updates(params=params, grads=tensor.grad(traincost, params), stepsizes=[s_lr] * len(params)), givens={ s_batchsize: config['batchsize'], s_inputs_all: tensor.as_tensor_variable(train_Xy[0]), s_labels_all: train_Xy[1] }) valid_logreg_fn = theano.function( [s_idx], logreg.errors(s_labels).mean(), givens={ s_batchsize: config['batchsize'], s_inputs_all: tensor.as_tensor_variable(valid_Xy[0]), s_labels_all: valid_Xy[1] }) test_logreg_fn = theano.function( [s_idx], logreg.errors(s_labels).mean(), givens={ s_batchsize: config['batchsize'], s_inputs_all: tensor.as_tensor_variable(test_Xy[0]), s_labels_all: test_Xy[1] }) rval['best_epoch'] = -1 rval['best_epoch_valid'] = -1 rval['best_epoch_train'] = -1 rval['best_epoch_test'] = -1 rval['status'] = 'ok' valid_rate = -1 test_rate = -1 train_rate = -1 n_train_batches = dataset.descr['n_train'] // config['batchsize'] n_valid_batches = dataset.descr['n_valid'] // config['batchsize'] n_test_batches = dataset.descr['n_test'] // config['batchsize'] n_iters = 0 for epoch in xrange(config['sup_max_epochs']): e_lr = config['lr'] e_lr *= min(1, config['lr_anneal_start'] / float(n_iters + 1)) #anneal learning rate valid_rate = float(1 - numpy.mean( [valid_logreg_fn(i) for i in range(n_valid_batches)])) valid_rate_std_thresh = 0.5 * numpy.sqrt( valid_rate * (1 - valid_rate) / (n_valid_batches * config['batchsize'])) if valid_rate > (rval['best_epoch_valid'] + valid_rate_std_thresh): rval['best_epoch'] = epoch rval['best_epoch_test'] = test_rate rval['best_epoch_valid'] = valid_rate rval['best_epoch_train'] = train_rate best_params = copy.deepcopy(params) logger.info( 'Epoch=%i best epoch %i valid %f test %f best_epoch_train %f prev_train %f' % (epoch, rval['best_epoch'], rval['best_epoch_valid'], rval['best_epoch_test'], rval['best_epoch_train'], train_rate)) #ctrl.info('Epoch %i train nll: %f'%(epoch, train_rate)) ctrl.checkpoint(rval) if epoch > config[ 'sup_min_epochs'] and epoch > 2 * rval['best_epoch']: break if time.time() > time_limit: break train_rate = float( numpy.mean([ train_logreg_fn(i, e_lr) for i in range(n_train_batches) ])) if not numpy.isfinite(train_rate): do_test = False rval['status'] = 'fail' rval['status_info'] = 'train_rate %f' % train_rate break ++n_iters do_test = 1 if do_test and rval['status'] == 'ok': # copy best params back into place for p, bp in zip(params, best_params): p.set_value(bp.get_value()) rval['best_epoch_test'] = 1 - float( numpy.mean([test_logreg_fn(i) for i in range(n_test_batches)])) rval['loss'] = 1.0 - rval['best_epoch_valid'] ctrl.info('rval: %s' % str(rval)) return rval
'versioning': False, 'resource_methods': ['GET', 'POST', 'DELETE'], 'item_methods': ['GET', 'PATCH', 'DELETE'], 'mongo_indexes': {'attr': ([('key', 1), ('id', 1)], {'background': True}), 'icao': ([('icao', 1)], {'background': True}), 'etag': ([('_etag', 1)], {'background': True, 'unique': True}), 'content': ([('descr', 'text'), ('label', 'text')], {'background': True}), 'rit_version': ([('rit_version', 1)], {'background': True}) }, 'allow_unknown': True, 'schema': _schema } from bson import SON, ObjectId agg_count_keys = { 'url': 'e5x/choices/keys/count', 'item_title': 'E5X choices count', 'pagination': False, 'datasource': { 'source': RESOURCE_COLLECTION, 'aggregation': { 'pipeline': [ {"$group": {"_id": {"key": "$key"}, "count": {"$sum": 1}}}, {"$sort": SON([("count", -1), ("_id", -1)])} ] } } }
def __init__(self, *args, **values): """ Initialise a document or an embedded document. :param values: A dictionary of keys and values for the document. It may contain additional reserved keywords, e.g. "__auto_convert". :param __auto_convert: If True, supplied values will be converted to Python-type values via each field's `to_python` method. :param _created: Indicates whether this is a brand new document or whether it's already been persisted before. Defaults to true. """ self._initialised = False self._created = True if args: raise TypeError( "Instantiating a document with positional arguments is not " "supported. Please use `field_name=value` keyword arguments.") __auto_convert = values.pop("__auto_convert", True) _created = values.pop("_created", True) signals.pre_init.send(self.__class__, document=self, values=values) # Check if there are undefined fields supplied to the constructor, # if so raise an Exception. if not self._dynamic and (self._meta.get("strict", True) or _created): _undefined_fields = set(values.keys()) - set( list(self._fields.keys()) + ["id", "pk", "_cls", "_text_score"]) if _undefined_fields: msg = f'The fields "{_undefined_fields}" do not exist on the document "{self._class_name}"' raise FieldDoesNotExist(msg) if self.STRICT and not self._dynamic: self._data = StrictDict.create(allowed_keys=self._fields_ordered)() else: self._data = {} self._dynamic_fields = SON() # Assign default values for fields # not set in the constructor for field_name in self._fields: if field_name in values: continue value = getattr(self, field_name, None) setattr(self, field_name, value) if "_cls" not in values: self._cls = self._class_name # Set actual values dynamic_data = {} FileField = _import_class("FileField") for key, value in values.items(): field = self._fields.get(key) if field or key in ("id", "pk", "_cls"): if __auto_convert and value is not None: if field and not isinstance(field, FileField): value = field.to_python(value) setattr(self, key, value) else: if self._dynamic: dynamic_data[key] = value else: # For strict Document self._data[key] = value # Set any get_<field>_display methods self.__set_field_display() if self._dynamic: self._dynamic_lock = False for key, value in dynamic_data.items(): setattr(self, key, value) # Flag initialised self._initialised = True self._created = _created signals.post_init.send(self.__class__, document=self)
def get_list_distinct_approval_year_and_period(args): ret = {} collection = common.get_collection('TMPER_AprPeriod').aggregate([{ "$lookup": { 'from': common.get_collection_name_with_schema('SYS_VW_ValueList'), 'localField': 'apr_period', 'foreignField': 'value', 'as': 'aprPeriod' } }, { "$unwind": { 'path': '$aprPeriod', "preserveNullAndEmptyArrays": True } }, { "$match": { "$and": [{ 'aprPeriod.list_name': "LApprovalPeriod" }, { 'aprPeriod.language': quicky.language.get_language() }] } }, { "$project": { "apr_period_a": { "$ifNull": ["$aprPeriod.caption", ""] }, "apr_year_a": { '$toString': "$apr_year" }, "apr_period": { '$toString': "$apr_period" }, "apr_year": 1 } }, { "$project": { "caption": { '$concat': ["$apr_year_a", " / ", "$apr_period_a"] }, "value": { '$concat': ["$apr_year_a", "__", "$apr_period"] }, "apr_year": 1, "apr_period": { '$toInt': "$apr_period" }, } }, { "$sort": SON([("apr_year", -1), ("apr_period", -1)]) }]) ret = list(collection) ret1 = [] if (args['data'] != None and args['data'].has_key('apr_period' and 'apr_year')): for x in ret: if (x['apr_period'] == args['data']['apr_period'] and x['apr_year'] == args['data']['apr_year']): continue else: collection1 = common.get_collection( 'TMPER_AprPeriodEmpOut').aggregate([ { "$match": { "$and": [{ 'apr_year': x['apr_year'] }, { 'apr_period': x['apr_period'] }] } }, ]) if (len(list(collection1)) == 0): ret1.append(x) item_list = [e for e in ret if e not in ret1] return item_list
def query(_doc_cls=None, **kwargs): """Transform a query from Django-style format to Mongo format.""" mongo_query = {} merge_query = defaultdict(list) for key, value in sorted(kwargs.items()): if key == "__raw__": mongo_query.update(value) continue parts = key.rsplit("__") indices = [(i, p) for i, p in enumerate(parts) if p.isdigit()] parts = [part for part in parts if not part.isdigit()] # Check for an operator and transform to mongo-style if there is op = None if len(parts) > 1 and parts[-1] in MATCH_OPERATORS: op = parts.pop() # Allow to escape operator-like field name by __ if len(parts) > 1 and parts[-1] == "": parts.pop() negate = False if len(parts) > 1 and parts[-1] == "not": parts.pop() negate = True if _doc_cls: # Switch field names to proper names [set in Field(name='foo')] try: fields = _doc_cls._lookup_field(parts) except Exception as e: raise InvalidQueryError(e) parts = [] CachedReferenceField = _import_class("CachedReferenceField") GenericReferenceField = _import_class("GenericReferenceField") cleaned_fields = [] for field in fields: append_field = True if isinstance(field, six.string_types): parts.append(field) append_field = False # is last and CachedReferenceField elif isinstance(field, CachedReferenceField) and fields[-1] == field: parts.append("%s._id" % field.db_field) else: parts.append(field.db_field) if append_field: cleaned_fields.append(field) # Convert value to proper value field = cleaned_fields[-1] singular_ops = [None, "ne", "gt", "gte", "lt", "lte", "not"] singular_ops += STRING_OPERATORS if op in singular_ops: value = field.prepare_query_value(op, value) if isinstance(field, CachedReferenceField) and value: value = value["_id"] elif op in ("in", "nin", "all", "near") and not isinstance(value, dict): # Raise an error if the in/nin/all/near param is not iterable. value = _prepare_query_for_iterable(field, op, value) # If we're querying a GenericReferenceField, we need to alter the # key depending on the value: # * If the value is a DBRef, the key should be "field_name._ref". # * If the value is an ObjectId, the key should be "field_name._ref.$id". if isinstance(field, GenericReferenceField): if isinstance(value, DBRef): parts[-1] += "._ref" elif isinstance(value, ObjectId): parts[-1] += "._ref.$id" # if op and op not in COMPARISON_OPERATORS: if op: if op in GEO_OPERATORS: value = _geo_operator(field, op, value) elif op in ("match", "elemMatch"): ListField = _import_class("ListField") EmbeddedDocumentField = _import_class("EmbeddedDocumentField") if (isinstance(value, dict) and isinstance(field, ListField) and isinstance(field.field, EmbeddedDocumentField)): value = query(field.field.document_type, **value) else: value = field.prepare_query_value(op, value) value = {"$elemMatch": value} elif op in CUSTOM_OPERATORS: NotImplementedError('Custom method "%s" has not ' "been implemented" % op) elif op not in STRING_OPERATORS: value = {"$" + op: value} if negate: value = {"$not": value} for i, part in indices: parts.insert(i, part) key = ".".join(parts) if key not in mongo_query: mongo_query[key] = value else: if isinstance(mongo_query[key], dict) and isinstance(value, dict): mongo_query[key].update(value) # $max/minDistance needs to come last - convert to SON value_dict = mongo_query[key] if ("$maxDistance" in value_dict or "$minDistance" in value_dict) and ("$near" in value_dict or "$nearSphere" in value_dict): value_son = SON() for k, v in iteritems(value_dict): if k == "$maxDistance" or k == "$minDistance": continue value_son[k] = v # Required for MongoDB >= 2.6, may fail when combining # PyMongo 3+ and MongoDB < 2.6 near_embedded = False for near_op in ("$near", "$nearSphere"): if isinstance(value_dict.get(near_op), dict): value_son[near_op] = SON(value_son[near_op]) if "$maxDistance" in value_dict: value_son[near_op][ "$maxDistance"] = value_dict[ "$maxDistance"] if "$minDistance" in value_dict: value_son[near_op][ "$minDistance"] = value_dict[ "$minDistance"] near_embedded = True if not near_embedded: if "$maxDistance" in value_dict: value_son["$maxDistance"] = value_dict[ "$maxDistance"] if "$minDistance" in value_dict: value_son["$minDistance"] = value_dict[ "$minDistance"] mongo_query[key] = value_son else: # Store for manually merging later merge_query[key].append(value) # The queryset has been filter in such a way we must manually merge for k, v in merge_query.items(): merge_query[k].append(mongo_query[k]) del mongo_query[k] if isinstance(v, list): value = [{k: val} for val in v] if "$and" in mongo_query.keys(): mongo_query["$and"].extend(value) else: mongo_query["$and"] = value return mongo_query
def build_itemsets_by_day(distance, undefined=True, only_tourists=True): hash_table = dict() count = 0 f = open('DatasetBuilding/input/is_userdays.txt', mode='w') g = open('DatasetBuilding/translation_tables/is_userdays_tt', mode='w') q1 = {'touristLocal': 'tourist'} q2 = {'$or': [{'touristLocal': 'tourist'}, {'touristLocal': 'undefined'}]} if only_tourists: query = q1 else: query = q2 for tourist in ucol.find(query): tweets = [] item_set = set() prevDate = None for sequence in scol.find({ 'user_id': tourist['id'] }).sort('serial', pymongo.ASCENDING): for id in sequence['sequence']: for tweet in tcol.find({'id': id}): currentDate = datetime.datetime.strptime( tweet['created_at'], '%a %b %d %H:%M:%S +0000 %Y') if prevDate is None: prevDate = currentDate if (currentDate.date() - prevDate.date()).days > 0: f.write(" ".join(list(map(str, sorted(item_set))))) f.write("\n") item_set = set() #print(currentDate) tweets.append(tweet) lon = tweet['coordinates']['coordinates'][0] lat = tweet['coordinates']['coordinates'][1] queryNear = { 'coordinates': SON([('$near', { "type": "Point", "coordinates": [lon, lat] }), ('$maxDistance', distance)]) } close_pois = pcol.find(queryNear).limit(1) closest_poi = 'UNDEFINED' if close_pois.count() > 0: closest_poi = close_pois.next()['name'] else: if not undefined: continue code = hash_table.get(closest_poi, 0) if code == 0: count += 1 hash_table[closest_poi] = count code = count item_set.add(code) if len(item_set) > 0: f.write(" ".join(list(map(str, sorted(item_set))))) f.write("\n") aux = [] for key, value in hash_table.items(): aux += [(key, value)] sorted_aux = sorted(aux, key=lambda tup: tup[1]) for item in sorted_aux: g.write(str(item[1]) + "\t" + item[0] + "\n")
agg_count_types = { 'url': 'organizations/types/count', 'item_title': 'Organiuzations Types Count', 'pagination': False, 'datasource': { 'source': RESOURCE_COLLECTION, 'aggregation': { 'pipeline': [{ "$group": { "_id": "$type_id", "count": { "$sum": 1 } } }, { "$sort": SON([("count", -1), ("_id", -1)]) }] } } } agg_parents = { 'item_title': 'Content Parents Aggregation', 'pagination': False, 'datasource': { 'source': RESOURCE_COLLECTION, 'aggregation': { 'pipeline': [ { "$match": { "id": "$start_id",
def default(obj, json_options=DEFAULT_JSON_OPTIONS): # We preserve key order when rendering SON, DBRef, etc. as JSON by # returning a SON for those types instead of a dict. if isinstance(obj, ObjectId): return {"$oid": str(obj)} if isinstance(obj, DBRef): return _json_convert(obj.as_doc(), json_options=json_options) if isinstance(obj, datetime.datetime): if (json_options.datetime_representation == DatetimeRepresentation.ISO8601): if not obj.tzinfo: obj = obj.replace(tzinfo=utc) if obj >= EPOCH_AWARE: off = obj.tzinfo.utcoffset(obj) if (off.days, off.seconds, off.microseconds) == (0, 0, 0): tz_string = 'Z' else: tz_string = obj.strftime('%z') millis = int(obj.microsecond / 1000) fracsecs = ".%03d" % (millis,) if millis else "" return {"$date": "%s%s%s" % ( obj.strftime("%Y-%m-%dT%H:%M:%S"), fracsecs, tz_string)} millis = bson._datetime_to_millis(obj) if (json_options.datetime_representation == DatetimeRepresentation.LEGACY): return {"$date": millis} return {"$date": {"$numberLong": str(millis)}} if json_options.strict_number_long and isinstance(obj, Int64): return {"$numberLong": str(obj)} if isinstance(obj, (RE_TYPE, Regex)): flags = "" if obj.flags & re.IGNORECASE: flags += "i" if obj.flags & re.LOCALE: flags += "l" if obj.flags & re.MULTILINE: flags += "m" if obj.flags & re.DOTALL: flags += "s" if obj.flags & re.UNICODE: flags += "u" if obj.flags & re.VERBOSE: flags += "x" if isinstance(obj.pattern, str): pattern = obj.pattern else: pattern = obj.pattern.decode('utf-8') if json_options.json_mode == JSONMode.LEGACY: return SON([("$regex", pattern), ("$options", flags)]) return {'$regularExpression': SON([("pattern", pattern), ("options", flags)])} if isinstance(obj, MinKey): return {"$minKey": 1} if isinstance(obj, MaxKey): return {"$maxKey": 1} if isinstance(obj, Timestamp): return {"$timestamp": SON([("t", obj.time), ("i", obj.inc)])} if isinstance(obj, Code): if obj.scope is None: return {'$code': str(obj)} return SON([ ('$code', str(obj)), ('$scope', _json_convert(obj.scope, json_options))]) if isinstance(obj, Binary): return _encode_binary(obj, obj.subtype, json_options) if isinstance(obj, bytes): return _encode_binary(obj, 0, json_options) if isinstance(obj, uuid.UUID): if json_options.strict_uuid: binval = Binary.from_uuid( obj, uuid_representation=json_options.uuid_representation) return _encode_binary(binval, binval.subtype, json_options) else: return {"$uuid": obj.hex} if isinstance(obj, Decimal128): return {"$numberDecimal": str(obj)} if isinstance(obj, bool): return obj if (json_options.json_mode == JSONMode.CANONICAL and isinstance(obj, int)): if -2 ** 31 <= obj < 2 ** 31: return {'$numberInt': str(obj)} return {'$numberLong': str(obj)} if json_options.json_mode != JSONMode.LEGACY and isinstance(obj, float): if math.isnan(obj): return {'$numberDouble': 'NaN'} elif math.isinf(obj): representation = 'Infinity' if obj > 0 else '-Infinity' return {'$numberDouble': representation} elif json_options.json_mode == JSONMode.CANONICAL: # repr() will return the shortest string guaranteed to produce the # original value, when float() is called on it. return {'$numberDouble': str(repr(obj))} raise TypeError("%r is not JSON serializable" % obj)
def test_update(self): self.coll.insert([{}, {}]) bulk = self.coll.initialize_ordered_bulk_op() # update() requires find() first. self.assertRaises(AttributeError, lambda: bulk.update({'$set': { 'x': 1 }})) self.assertRaises(TypeError, bulk.find({}).update, 1) self.assertRaises(ValueError, bulk.find({}).update, {}) # All fields must be $-operators. self.assertRaises(ValueError, bulk.find({}).update, {'foo': 'bar'}) bulk.find({}).update({'$set': {'foo': 'bar'}}) result = bulk.execute() self.assertEqualResponse( { 'nMatched': 2, 'nModified': 2, 'nUpserted': 0, 'nInserted': 0, 'nRemoved': 0, 'upserted': [], 'writeErrors': [], 'writeConcernErrors': [] }, result) self.assertEqual(self.coll.find({'foo': 'bar'}).count(), 2) # All fields must be $-operators -- validated server-side. bulk = self.coll.initialize_ordered_bulk_op() updates = SON([('$set', {'x': 1}), ('y', 1)]) bulk.find({}).update(updates) self.assertRaises(BulkWriteError, bulk.execute) self.coll.remove() self.coll.insert([{}, {}]) bulk = self.coll.initialize_unordered_bulk_op() bulk.find({}).update({'$set': {'bim': 'baz'}}) result = bulk.execute() self.assertEqualResponse( { 'nMatched': 2, 'nModified': 2, 'nUpserted': 0, 'nInserted': 0, 'nRemoved': 0, 'upserted': [], 'writeErrors': [], 'writeConcernErrors': [] }, result) self.assertEqual(self.coll.find({'bim': 'baz'}).count(), 2) self.coll.insert({'x': 1}) bulk = self.coll.initialize_unordered_bulk_op() bulk.find({'x': 1}).update({'$set': {'x': 42}}) result = bulk.execute() self.assertEqualResponse( { 'nMatched': 1, 'nModified': 1, 'nUpserted': 0, 'nInserted': 0, 'nRemoved': 0, 'upserted': [], 'writeErrors': [], 'writeConcernErrors': [] }, result) self.assertEqual(1, self.coll.find({'x': 42}).count()) # Second time, x is already 42 so nModified is 0. bulk = self.coll.initialize_unordered_bulk_op() bulk.find({'x': 42}).update({'$set': {'x': 42}}) result = bulk.execute() self.assertEqualResponse( { 'nMatched': 1, 'nModified': 0, 'nUpserted': 0, 'nInserted': 0, 'nRemoved': 0, 'upserted': [], 'writeErrors': [], 'writeConcernErrors': [] }, result)
def add_sorting(self, order_by: list): ordering = [(field, 1) for field in order_by] sorting = {"$sort": SON(ordering)} return sorting
def query(_doc_cls=None, _field_operation=False, **query): """Transform a query from Django-style format to Mongo format. """ mongo_query = {} merge_query = defaultdict(list) for key, value in sorted(query.items()): if key == "__raw__": mongo_query.update(value) continue parts = key.split('__') indices = [(i, p) for i, p in enumerate(parts) if p.isdigit()] parts = [part for part in parts if not part.isdigit()] # Check for an operator and transform to mongo-style if there is op = None if parts[-1] in MATCH_OPERATORS: op = parts.pop() negate = False if parts[-1] == 'not': parts.pop() negate = True if _doc_cls: # Switch field names to proper names [set in Field(name='foo')] try: fields = _doc_cls._lookup_field(parts) except Exception as e: raise InvalidQueryError(e) parts = [] cleaned_fields = [] for field in fields: append_field = True if isinstance(field, str): parts.append(field) append_field = False else: parts.append(field.db_field) if append_field: cleaned_fields.append(field) # Convert value to proper value field = cleaned_fields[-1] singular_ops = [None, 'ne', 'gt', 'gte', 'lt', 'lte', 'not'] singular_ops += STRING_OPERATORS if op in singular_ops: if isinstance(field, str): if (op in STRING_OPERATORS and isinstance(value, str)): StringField = _import_class('StringField') value = StringField.prepare_query_value(op, value) else: value = field else: value = field.prepare_query_value(op, value) elif op in ('in', 'nin', 'all', 'near') and not isinstance(value, dict): # 'in', 'nin' and 'all' require a list of values value = [field.prepare_query_value(op, v) for v in value] # if op and op not in COMPARISON_OPERATORS: if op: if op in GEO_OPERATORS: value = _geo_operator(field, op, value) elif op in CUSTOM_OPERATORS: if op == 'match': value = {"$elemMatch": value} else: NotImplementedError("Custom method '%s' has not " "been implemented" % op) elif op not in STRING_OPERATORS: value = {'$' + op: value} if negate: value = {'$not': value} for i, part in indices: parts.insert(i, part) key = '.'.join(parts) if op is None or key not in mongo_query: mongo_query[key] = value elif key in mongo_query: if key in mongo_query and isinstance(mongo_query[key], dict): mongo_query[key].update(value) # $maxDistance needs to come last - convert to SON if '$maxDistance' in mongo_query[key]: value_dict = mongo_query[key] value_son = SON() for k, v in value_dict.items(): if k == '$maxDistance': continue value_son[k] = v value_son['$maxDistance'] = value_dict['$maxDistance'] mongo_query[key] = value_son else: # Store for manually merging later merge_query[key].append(value) # The queryset has been filter in such a way we must manually merge for k, v in list(merge_query.items()): merge_query[k].append(mongo_query[k]) del mongo_query[k] if isinstance(v, list): value = [{k: val} for val in v] if '$and' in list(mongo_query.keys()): mongo_query['$and'].append(value) else: mongo_query['$and'] = value return mongo_query
def count_cmd(self, select=None, take=None, skip=None): cmd = SON([('aggregate', self._mongo_collection.name)]) cmd.update({'query': select}) if select else None cmd.update({'limit': take}) if take else None cmd.update({'skip': skip}) if skip else None return self.db_command(cmd)['n']
] result = db.my.bulk_write(requests) bulk() # inventory.insert_one( # {"item": "canvas", # "qty": 100, # "tags": ["cotton"], # "size": {"h": 28, "w": 35.5, "uom": "cm"}}) # inventory.insert_many([ # {"item": "journal", # "qty": 25, # "tags": ["blank", "red"], # "size": {"h": 14, "w": 21, "uom": "cm"}}, # {"item": "mat", # "qty": 85, # "tags": ["gray"], # "size": {"h": 27.9, "w": 35.5, "uom": "cm"}}, # {"item": "mousepad", # "qty": 25, # "tags": ["gel", "blue"], # "size": {"h": 19, "w": 22.85, "uom": "cm"}}]) cursor = inventory.find({"size": SON([("h", 14), ("w", 21), ("uom", "cm")])}) inventory.find_one({"size.uom": "in"}) output(inventory.find({"size.uom": "cm"}))
def find_cmd(self, select=None, project=None, sort=None, take=None, skip=None, tailable=False, reduce_by=None): cmd = SON([('find', self._mongo_collection.name)]) cmd.update({'filter': select}) if select else None cmd.update({'projection': project}) if project else None cmd.update({'sort': sort}) if sort else None cmd.update({'skip': skip}) if skip else None cmd.update({'limit': take}) if take else None cmd.update({'tailable': tailable}) if tailable else None cmd.update({'singleBatch': True}) cmd.update({'batchSize': 1000}) docs = self.db_command(cmd)['cursor']['firstBatch'] if hasattr(docs, '__iter__'): return [d if not reduce_by else reduce_by(**d) for d in docs] # for d in docs: # yield d if not reduce_by else reduce_by().objectify(d) else: return docs if not reduce_by else reduce_by(**docs)
def __init__(self, _hsid=0, _hflags=0): SON.__init__(self) self.hdr = Hdr(99, _hsid, 0, _hflags) self.type = 99 self['t'] = 99
model1 = SON([ #global ('color_space' , 'rgb'), ('conv_mode' , 'valid'), # prepare images before processing ('preproc', SON([ # resize input images by keeping aspect ratio and fix the biggest edge ('max_edge' , 150), # kernel size of the box low pass filter ('lsum_ksize' , 3), # how to resize the image ('resize_method' , 'bicubic'), ])), # - input local normalization # local zero-mean, unit-magnitude ('normin', SON([ # kernel shape of the local normalization ('kshape' , (3,3)), # magnitude threshold # if the vector's length is below, it doesn't get resized ('threshold' , 1.0), ])), # - linear filtering ('filter', SON([ ('model_name','gridded_gabor'), # kernel shape of the gabors ('kshape' , [43,43]), # list of orientations ('norients' , 16), # list of frequencies ('divfreqs' , [2, 3, 4, 6, 11, 18]), # list of phases ('phases' , [0]), ])), # - simple non-linear activation ('activ', SON([ # minimum output (clamp) ('minout' , 0), # maximum output (clamp) ('maxout' , 1), ])), # - output local normalization ('normout', SON([ # kernel shape of the local normalization ('kshape', (3,3)), # magnitude threshold # if the vector's length is below, it doesn't get resized ('threshold', 1.0), ])), # - pooling ('pool' , SON([ # kernel size of the local sum (2d slice) ('lsum_ksize' , 17 ), # fixed output shape (only the first 2 dimensions, y and x) ('outshape' , (30,30)), ])), # -- featsel details what features you want to be included in the vector ('featsel' , SON([ # Include representation output ? True or False ('output' , True), # Include grayscale values ? None or (height, width) ('input_gray' , None), # Include color histograms ? None or nbins per color ('input_colorhists' , None), # Include input norm histograms ? None or (division, nfeatures) ('normin_hists' , None), # Include filter output histograms ? None or (division, nfeatures) ('filter_hists' , None), # Include activation output histograms ? None or (division, nfeatures) ('activ_hists' , None), # Include output norm histograms ? None or (division, nfeatures) ('normout_hists' , None), # Include representation output histograms ? None or (division, nfeatures) ('pool_hists' , None), ])) ])
def __init__(self, bytes, _hsid=0, _hflags=0): SON.__init__(self) self.hdr = Hdr(2, _hsid, 0, _hflags) self.type = 2 self['t'] = 2 self['bytes'] = bytes
base_model = SON([ #global ('color_space' , 'rgb'), ('conv_mode' , 'valid'), # prepare images before processing ('preproc', SON([ # resize input images by keeping aspect ratio and fix the biggest edge ('max_edge' , None), # kernel size of the box low pass filter ('lsum_ksize' , None), # how to resize the image ('resize_method' , None), ('whiten', False) ])), # - input local normalization # local zero-mean, unit-magnitude ('normin', SON([ # kernel shape of the local normalization ('kshape' , (3,3)), # magnitude threshold # if the vector's length is below, it doesn't get resized ('threshold' , 1.0), ])), # - linear filtering ('filter', SON([ ('model_name','cairo_generated'), ('kshape' , [32,32]), # list of orientations ('specs',[SON([ ('width',64), ('height',64), ('object' , cairo_objects.SQUARE), ('pattern' , cairo_objects.SOLID_RED), ]),SON([ ('width',64), ('height',64), ('object' , cairo_objects.SQUARE), ('pattern' , cairo_objects.SOLID_RED), ('sx',.5), ('sy',2) ])] , ) ])), # - simple non-linear activation ('activ', SON([ # minimum output (clamp) # maximum output (clamp) ('maxout' , 1), ])), # - output local normalization ('normout', SON([ # kernel shape of the local normalization ('kshape', (3,3)), # magnitude threshold # if the vector's length is below, it doesn't get resized ('threshold', 1.0), ])), ])
def command(sock, dbname, spec, slave_ok, is_mongos, read_preference, codec_options, session, client, check=True, allowable_errors=None, address=None, check_keys=False, listeners=None, max_bson_size=None, read_concern=None, parse_write_concern_error=False, collation=None, retryable_write=False): """Execute a command over the socket, or raise socket.error. :Parameters: - `sock`: a raw socket instance - `dbname`: name of the database on which to run the command - `spec`: a command document as a dict, SON, or mapping object - `slave_ok`: whether to set the SlaveOkay wire protocol bit - `is_mongos`: are we connected to a mongos? - `read_preference`: a read preference - `codec_options`: a CodecOptions instance - `session`: optional ClientSession instance. - `client`: optional MongoClient instance for updating $clusterTime. - `check`: raise OperationFailure if there are errors - `allowable_errors`: errors to ignore if `check` is True - `address`: the (host, port) of `sock` - `check_keys`: if True, check `spec` for invalid keys - `listeners`: An instance of :class:`~pymongo.monitoring.EventListeners` - `max_bson_size`: The maximum encoded bson size for this server - `read_concern`: The read concern for this command. - `parse_write_concern_error`: Whether to parse the ``writeConcernError`` field in the command response. - `collation`: The collation for this command. - `retryable_write`: True if this command is a retryable write. """ name = next(iter(spec)) ns = dbname + '.$cmd' flags = 4 if slave_ok else 0 if (client or session) and not isinstance(spec, ORDERED_TYPES): # Ensure command name remains in first place. spec = SON(spec) if session: spec['lsid'] = session._use_lsid() if retryable_write: spec['txnNumber'] = session._transaction_id() if client: client._send_cluster_time(spec, session) # Publish the original command document, perhaps with lsid and $clusterTime. orig = spec if is_mongos: spec = message._maybe_add_read_preference(spec, read_preference) if read_concern: if read_concern.level: spec['readConcern'] = read_concern.document if (session and session.options.causal_consistency and session.operation_time is not None): spec.setdefault( 'readConcern', {})['afterClusterTime'] = session.operation_time if collation is not None: spec['collation'] = collation publish = listeners is not None and listeners.enabled_for_commands if publish: start = datetime.datetime.now() request_id, msg, size = message.query(flags, ns, 0, -1, spec, None, codec_options, check_keys) if (max_bson_size is not None and size > max_bson_size + message._COMMAND_OVERHEAD): message._raise_document_too_large( name, size, max_bson_size + message._COMMAND_OVERHEAD) if publish: encoding_duration = datetime.datetime.now() - start listeners.publish_command_start(orig, dbname, request_id, address) start = datetime.datetime.now() try: sock.sendall(msg) reply = receive_message(sock, request_id) unpacked_docs = reply.unpack_response(codec_options=codec_options) response_doc = unpacked_docs[0] if client: client._receive_cluster_time(response_doc, session) if check: helpers._check_command_response( response_doc, None, allowable_errors, parse_write_concern_error=parse_write_concern_error) except Exception as exc: if publish: duration = (datetime.datetime.now() - start) + encoding_duration if isinstance(exc, (NotMasterError, OperationFailure)): failure = exc.details else: failure = message._convert_exception(exc) listeners.publish_command_failure( duration, failure, name, request_id, address) raise if publish: duration = (datetime.datetime.now() - start) + encoding_duration listeners.publish_command_success( duration, response_doc, name, request_id, address) return response_doc