コード例 #1
0
    def copy(self, srcdst: dict):
        """
        Copy the files in ``srcdst``.

        This method only copies individual files. It does not recursivley copy
        directories.

        All destination names must be unique. If they are not then this method
        will return immediately with an error.

        :param dict[src:dst] srcdst: src/dst pairs.
        :return: int num_copied
        """
        # Verify that all targets are unique.
        dst = list(srcdst.values())
        if sorted(dst) != sorted(list(set(dst))):
            return RetVal(False, 'Not all targets are unique', None)

        # Copy each file from src to dst.
        num_copied = 0
        for src, dst in srcdst.items():
            try:
                ret = self.get([src])
                assert ret.ok and src in ret.data
                assert self.put({dst: ret.data[src]}).ok
                num_copied += 1
            except AssertionError:
                continue
        return RetVal(True, None, num_copied)
コード例 #2
0
    def removeConstraints(self, constraints: (tuple, list)):
        """
        Delete the ``constraints`` in the data base.

        It is save to call this method on non-existing constraints (it will
        simply skip them).

        ..note:: this will *not* update the local cache. Call
                 ``updateLocalCache`` to do so.

        :param list constraints: list of `ConstraintMeta` tuples.
        :return: number of deleted entries.
        """
        # Compile- and sanity check the input.
        constraints = [ConstraintMeta(*_) for _ in constraints]

        # Return immediately if the list of constraints to add is empty.
        if len(constraints) == 0:
            return RetVal(True, None, 0)

        # Compile datastore operation for each constraints to remove.
        ops = []
        for con in constraints:
            # See 'addConstraints' method for an explanation of the colon
            # separated compound key.
            key = ':'.join([con.aid, con.contype, con.rb_a, con.rb_b])
            ops.append(key)
        ret = self.db.remove(ops)

        # Return the number of deleted constraints.
        return RetVal(True, None, ret.data)
コード例 #3
0
    def put(self, ops: dict):
        """
        See docu in ``DatastoreBase``.
        """
        # Sanity check all arguments.
        if _checkPut(ops) is False:
            self.logit.warning('Invalid PUT argument')
            return RetVal(False, 'Argument error', None)

        ret = {}
        for aid, op in ops.items():
            # Unpack the data and make a genuine copy of it (jsut to avoid bad
            # surprised because dictionaries are mutable).
            data = op['data']
            data = copy.deepcopy(data)

            # Insert the AID field. This field is the primary key in the
            # Datastore.
            data['aid'] = aid

            # Insert the document only if it does not yet exist.
            r = self.db.update_one({'aid': aid}, {'$setOnInsert': data},
                                   upsert=True)

            # Specify the success value for this document depending on whether
            # it already existed in the database or not.
            if r.upserted_id is None:
                ret[aid] = False
            else:
                ret[aid] = True
        return RetVal(True, None, ret)
コード例 #4
0
def addCmdBoosterForce(objID: str, force: list, torque: list):
    """
    Orient ``torque`` and ``force`` according to the ``objID`` and then apply
    them to the object.

    The only difference between this command and ``addCmdDirectForce`` is that
    the ``force`` and ``torque`` vector are specified in the object coordinate
    system and Leonard will rotate them to world coordinates before actually
    applying the force.

    Other services, most notably Leonard, will periodically check for new
    announcements and incorporate them into the simulation as necessary.

    :param str objID: the object
    :param list force: apply this central ``force`` to ``objID``.
    :param list torque: apply this ``torque`` to ``objID``.
    :return bool: Success
    """
    # Sanity check.
    if objID == '':
        msg = 'Invalid Object ID'
        logit.warning(msg)
        return RetVal(False, msg, None)
    if not (len(force) == len(torque) == 3):
        return RetVal(False, 'force or torque has invalid length', None)

    # Compile datastore ops.
    db = datastore.getDSHandle('Commands')
    data = {'force': force, 'torque': torque}
    key = 'booster_force:{}'.format(objID)
    ops = {key: {'data': data}}
    db.put(ops)

    return RetVal(True, None, None)
コード例 #5
0
def getUniqueObjectIDs(numIDs: int):
    """
    Return a list of ``numIDs`` unique strings.

    :param int numIDs: non-negative integer.
    :return list[str]: for instance ['1', '2']
    """
    # Sanity check.
    if numIDs < 0:
        return RetVal(False, 'numIDs must be non-negative', None)

    # Increment the counter by ``numIDs``.
    db = dbHandles['Counters']
    ret = db.incrementCounter('objcnt', numIDs)
    if not ret.ok:
        return ret
    value = ret.data

    # The a range of values with length numIDs.
    if numIDs == 0:
        newIDs = range(1)
    else:
        newIDs = range(numIDs)

    # Convert [0, 1, ..., N] to [value, value-1, ..., value-N+1]. For instance,
    # if value=10 and N=3 then this will produce [10, 9, 8]
    newIDs = [value - ii for ii in newIDs]

    # Reverse the list and convert the integers to strings.
    newIDs = [str(_) for _ in newIDs[::-1]]
    return RetVal(True, None, newIDs)
コード例 #6
0
    def getMulti(self, aids: (list, tuple), prj=None):
        """
        See docu in ``DatastoreBase``.
        """
        # Sanity check all arguments.
        if _checkGet(aids, prj) is False:
            self.logit.warning('Invalid GETMULTI argument')
            return RetVal(False, 'Argument error', None)

        # Retrieve the requested documents.
        prj = self._compileProjectionOperator(prj)
        cursor = self.db.find({'aid': {'$in': aids}}, prj)

        # Compile all documents into a dictionary. The keys are the AIDs and
        # the values are the original documents with the AID field removed.
        docs = self._removeAID(cursor)

        # Compile the output dictionary and assign None for every requested
        # document. The overwrite the None values with the ones we could
        # retrieve from the database. This will ensure the set of keys in the
        # output dictionary matches ``aids`` even if not all values were
        # available in the database.
        out = {_: None for _ in aids}
        out.update(docs)

        # Return the documents (or None for those we did not find).
        return RetVal(True, None, out)
コード例 #7
0
def addCmdDirectForce(objID: str, force: list, torque: list):
    """
    Apply ``torque`` and central ``force`` to ``objID``.

    Other services, most notably Leonard, will periodically check for new
    announcements and incorporate them into the simulation as necessary.

    :param str objID: the object
    :param list force: apply this central ``force`` to ``objID``.
    :param list torque: apply this ``torque`` to ``objID``.
    :return bool: Success
    """
    # Sanity check.
    if objID == '':
        msg = 'Invalid Object ID'
        logit.warning(msg)
        return RetVal(False, msg, None)
    if not (len(force) == len(torque) == 3):
        return RetVal(False, 'force or torque has invalid length', None)

    # Compile datastore ops.
    db = datastore.getDSHandle('Commands')
    data = {'force': force, 'torque': torque}
    key = 'direct_force:{}'.format(objID)
    ops = {key: {'data': data}}
    db.put(ops)

    return RetVal(True, None, None)
コード例 #8
0
    def applyForceAndTorque(self, bodyID, force, torque):
        """
        Apply a ``force`` and ``torque`` to the center of mass of ``bodyID``.

        :param int bodyID: the ID of the body to update
        :param 3-array force: force applied directly to center of mass
        :param 3-array torque: torque around center of mass.
        :return: Success
        """
        # Sanity check.
        if bodyID not in self.rigidBodies:
            msg = 'Cannot set force of unknown body <{}>'.format(bodyID)
            self.logit.warning(msg)
            return RetVal(False, msg, None)

        # Convenience.
        body = self.rigidBodies[bodyID]

        # Convert the force and torque to Vec3.
        b_force = Vec3(*force)
        b_torque = Vec3(*torque)

        # Clear pending forces (should be cleared automatically by Bullet when
        # it steps the simulation) and apply the new ones.
        body.clearForces()
        body.applyCentralForce(b_force)
        body.applyTorque(b_torque)
        return RetVal(True, None, None)
コード例 #9
0
    def removeDirs(self, dirnames: (tuple, list)):
        """
        Recursively delete all directories specified in ``dirnames``.

        This function is the equivalent of `rm -rf path/*`. It always succeeds
        and returns the number of deleted files.

        :param list[str] fname: the file names to retrieve.
        :return: number of deleted directories.
        """
        # Sanity check: all entries must be strings. If a named does not end
        # with a slash ('/') then add it to avoid ambiguous queries.
        try:
            for idx, name in enumerate(dirnames):
                assert isinstance(name, str)
                dirnames[idx] = name if name[-1] != '/' else name[:-1]
        except AssertionError:
            return RetVal(False, 'Invalid arguments', None)

        # Delete the directories.
        num_deleted = 0
        for dirname in dirnames:
            # Find all filenames that begin with 'dirname' and delete them.
            query = {'filename': {'$regex': '^{}/.*'.format(dirname)}}
            fnames = set()
            for doc in self.fs.find(query):
                fnames.add(doc.filename)
                self.fs.delete(doc._id)

            # Aggregate the total number of deleted files (we do not count
            # multiple versions of the same file.).
            num_deleted += len(fnames)
        return RetVal(True, None, num_deleted)
コード例 #10
0
    def get(self, fnames: (tuple, list)):
        """
        Return the files specified in ``fnames``.

        The return value is a dictionary with the entries of ``fnames`` as
        keys. For instance, if `fnames = ['foo.txt', 'bar.png']` then the
        returned dictionary will be `{'foo.txt': bytes', 'bar.png': bytes}`.

        If a file could not be found then the returned dictionary will miss the
        respective key. This does not constitute an error (ie the 'ok' flag is
        still set).

        The 'ok' flag is only False if an error in the underlying file system
        occurred.

        :param list[str] fname: the file names to retrieve.
        :return: dict[file_name: file_content]
        """
        out = {}
        for fname in fnames:
            try:
                out[fname] = self.fs.get_last_version(fname).read()
            except gridfs.errors.NoFile as err:
                msg = 'GridFS URL <{}> not found'.format(fname)
                self.logit.info(msg)
            except gridfs.errors.CorruptGridFile:
                msg = 'Corrupt GridFS for URL <{}>'.format(fname)
                self.logit.error(msg)
                return RetVal(False, msg, None)
            except gridfs.errors.GridFSError:
                msg = 'Unkown GridFS error'
                self.logit.error(msg)
                return RetVal(False, msg, None)
        return RetVal(True, None, out)
コード例 #11
0
    def getConstraints(self, bodyIDs: (set, tuple, list)):
        """
        Return all constraints that involve any of the bodies in ``bodyIDs``.

        Return all constraints if ``bodyIDs`` is *None*.

        ..note:: this method only consults the local cache. Depending on your
            circumstances you may want to call ``updateLocalCache`` first.

        :param list[int] bodyIDs: list of body IDs
        :return: list of ``ConstraintMeta`` instances.
        :rtype: tuple
        """
        if bodyIDs is None:
            return RetVal(True, None, tuple(self._cache.values()))

        # Reduce bodyIDs to a set. This should speed up look ups.
        bodyIDs = {_ for _ in bodyIDs if isinstance(_, str)}

        # Iterate over all constraints and pick the ones that contain at least
        # one of the bodies specified in `bodyIDs`.
        out = []
        for tmp in self._cache:
            if not (tmp.rb_a in bodyIDs or tmp.rb_b in bodyIDs):
                continue
            out.append(self._cache[tmp])
        return RetVal(True, None, tuple(out))
コード例 #12
0
    def applyForce(self, bodyID: str, force, rel_pos):
        """
        Apply a ``force`` at ``rel_pos`` to ``bodyID``.

        :param str bodyID: the ID of the body to update
        :param 3-array force: force applied directly to center of mass
        :param 3-array rel_pos: position of force relative to center of mass
        :return: Success
        """
        # Sanity check.
        if bodyID not in self.rigidBodies:
            msg = 'Cannot set force of unknown body <{}>'.format(bodyID)
            return RetVal(False, msg, None)

        # Convenience.
        body = self.rigidBodies[bodyID]

        # Convert the force and torque to Vec3.
        b_force = Vec3(*force)
        b_relpos = Vec3(*rel_pos)

        # Clear pending forces (should be cleared automatically by Bullet when
        # it steps the simulation) and apply the new ones.
        body.clearForces()
        body.applyForce(b_force, b_relpos)
        return RetVal(True, None, None)
コード例 #13
0
def getRegion(name: str, ofs: np.ndarray,
              regionDim: (np.ndarray, list, tuple)):
    """
    Return the grid values starting at 3D position ``ofs``.

    The returned array comprises foure dimensions. The first three correspond
    to x/y/z position and the fourth contains the data. That data is itself a
    vector. The size of that vector was specified when the grid was created.
    The dimension of the returned region depends on ``regionDim`` and the
    ``vecDim`` of the grid. For instance, if regionDim=(1, 2, 3) and the
    vecDim=4, then the shape of the returned NumPy array is (1, 2, 3, 4).

    :param str name: grid name.
    :param 3D-vector ofs: start position in grid from where to read values.
    :param 3D-vector regionDim: number of values to read in each dimension.
    :return: 4D matrix.
    """
    # Fetch the database handle.
    ret = getGridDB(name)
    if not ret.ok:
        return ret
    db, admin = ret.data
    gran, vecDim = admin['gran'], admin['vecDim']
    del admin, ret

    # Sanity check: ``ofs`` and ``regionDim`` must have 3 entries each.
    if (len(ofs) != 3) or (len(regionDim) != 3):
        return RetVal(False, 'Invalid parameter values', None)

    # Sanity check: ``regionDim`` must only contain positive integers.
    regionDim = np.array(regionDim, np.int64)
    if np.amin(regionDim) < 1:
        return RetVal(False, 'Dimensions must be positive', None)

    # Compute the grid index of ``ofs``.
    x0, y0, z0, strPos = _encodePosition(ofs, gran)

    # Convenience: the ``regionDim`` parameter uniquely specifies the number of
    # grid positions to query in each dimension.
    x1 = int(x0 + regionDim[0])
    y1 = int(y0 + regionDim[1])
    z1 = int(z0 + regionDim[2])

    # Query the values of all the specified grid positions.
    res = db.find({'x': {'$gte': x0, '$lt': x1},
                   'y': {'$gte': y0, '$lt': y1},
                   'z': {'$gte': z0, '$lt': z1}})

    # Populate the output data structure.
    out = np.zeros(np.hstack((regionDim, vecDim)), np.float64)
    for doc in res:
        # Convert the grid index to an array index, ie simply compute all grid
        # indices relative to the ``ofs`` position.
        x = int(doc['x'] - x0)
        y = int(doc['y'] - y0)
        z = int(doc['z'] - z0)
        out[x, y, z, :] = np.array(doc['val'], np.float64)

    return RetVal(True, None, out)
コード例 #14
0
def addCmdSpawn(objData: (tuple, list)):
    """
    Announce that the elements in ``objData`` were created.

    The ``objData`` variables comprises a list of (objID, body) tuples.

    Returns **False** if ``objID`` already exists, is already scheduled to
    spawn, or if any of the parameters are invalid.

    Other services, most notably Leonard, will periodically check for new
    announcements and incorporate them into the simulation as necessary.

    :param tuple[(int, _RigidBodyData)]: the new objects created in Azrael.
    :return: success.
    """
    # Sanity check all bodies.
    for objID, body in objData:
        try:
            assert isinstance(objID, str)
            assert isinstance(body, _RigidBodyData)
        except AssertionError:
            msg = '<addCmdQueue> received invalid argument type'
            return RetVal(False, msg, None)

        if objID == '':
            msg = 'Invalid Object ID'
            logit.warning(msg)
            return RetVal(False, msg, None)

    # Compile the datastore ops.
    ops = {}
    for objID, body in objData:
        # Compile the AABBs. Return immediately if an error occurs.
        aabbs = computeAABBs(body.cshapes)
        if not aabbs.ok:
            return RetVal(False, 'Could not compile all AABBs', None)

        # Insert this document.
        data = {'rbs': body._asdict(), 'AABBs': aabbs.data}
        key = 'spawn:{}'.format(objID)
        ops[key] = {'data': data}

    # Store the spawn commands.
    db = datastore.getDSHandle('Commands')
    ret = db.put(ops)
    if not ret.ok:
        return ret

    # Notify the user if not all spawn commands could be written. This should
    # not happen because all object IDs must be unique. If this error occurs
    # then something is wrong with the atomic object count.
    if False in ret.data.values():
        msg = ('At least one spawn command for the same objID already '
               'exists --> serious bug')
        logit.error(msg)
        return RetVal(False, msg, None)
    else:
        # All objIDs were unique --> success.
        return RetVal(True, None, None)
コード例 #15
0
    def modify(self, ops: dict):
        """
        See docu in ``DatastoreBase``.
        """
        # Sanity check all arguments.
        if _checkMod(ops) is False:
            self.logit.warning('Invalid MODIFY argument')
            return RetVal(False, 'Argument error', None)

        # Iterate over all ops (one for each AID) and apply the requested
        # modifications.
        ret = {}
        for aid, op in ops.items():
            # Sanity checks. If any of the necessary keys do not exist then
            # then no modification will be applied to the current AID.
            try:
                # Get the document from the database.
                c = self.content[aid]

                # Verify that all the keys specified in the 'exists' field
                # actually do exist.
                for key, yes in op['exists'].items():
                    assert self.hasKey(c, key) is yes

                # Traverse the nested dictionaries to the value that we are
                # supposed to increase. Triggers a KeyError if the keys do not
                # all exist.
                for key_hierarchy in op['inc']:
                    tmp = c
                    for key in key_hierarchy:
                        tmp = tmp[key]

                    # The value to increment must be a number.
                    assert isinstance(tmp, (float, int))

                # If we got until here we have established that the request is
                # (probably) reasonable.
                ret[aid] = True
            except (AssertionError, KeyError):
                # Skip this AID because something went wrong. A 'False' will be
                # returned for it later.
                ret[aid] = False
                continue

            # Increment the specified keys.
            for key, val in op['inc'].items():
                self.incKey(self.content[aid], key, val)

            # Delete the specified keys.
            for key in op['unset']:
                self.delKey(self.content[aid], key)

            # Create/overwrite the specified keys/value pairs.
            for key, val in op['set'].items():
                self.setKey(self.content[aid], key, val)

        # Return the success status (True or False) for each AID.
        return RetVal(True, None, ret)
コード例 #16
0
 def setGravity(self, gravity: (tuple, list)):
     """
     Set the ``gravity`` in the simulation.
     """
     try:
         gravity = np.array(gravity, np.float64)
         assert gravity.ndim == 1
         assert len(gravity) == 3
     except (TypeError, ValueError, AssertionError):
         return RetVal(False, 'Invalid type', None)
     self.dynamicsWorld.setGravity(Vec3(*gravity))
     return RetVal(True, None, None)
コード例 #17
0
    def remove(self, aids: (tuple, list)):
        """
        See docu in ``DatastoreBase``.
        """
        # Sanity check all arguments.
        if _checkRemove(aids) is False:
            self.logit.warning('Invalid REMOVE argument')
            return RetVal(False, 'Argument error', None)

        # Delete the specified AIDs and return the number of actually deleted
        # documents.
        ret = self.db.delete_many({'aid': {'$in': aids}})
        return RetVal(True, None, ret.deleted_count)
コード例 #18
0
    def modify(self, ops: dict):
        """
        See docu in ``DatastoreBase``.
        """
        # Sanity check all arguments.
        if _checkMod(ops) is False:
            self.logit.warning('Invalid MOD argument')
            return RetVal(False, 'Argument error', None)

        # Issue the operations one-by-one.
        ret = {}
        for aid, op_tmp in ops.items():
            # Compile the first part of the query that specifies which (nested)
            # keys must exist.
            query = {
                '.'.join(key): {
                    '$exists': yes
                }
                for key, yes in op_tmp['exists'].items()
            }

            # Add the AID to the query.
            query['aid'] = aid

            # Compile the update operations.
            op = {
                '$inc':
                {'.'.join(key): val
                 for key, val in op_tmp['inc'].items()},
                '$set':
                {'.'.join(key): val
                 for key, val in op_tmp['set'].items()},
                '$unset': {'.'.join(key): True
                           for key in op_tmp['unset']},
            }

            # Prune the update operations (Mongo complains if they are empty).
            op = {k: v for k, v in op.items() if len(v) > 0}

            # If no updates are necessary then skip this object.
            if len(op) == 0:
                continue

            # Issue the database query.
            r = self.db.update_one(query, op, upsert=False)

            # The update was a success if Mongo could find a document that
            # matched our query. Since AID has a unique index it is impossible
            # to match more than one.
            ret[aid] = (r.matched_count == 1)
        return RetVal(True, None, ret)
コード例 #19
0
def setRegion(name: str, ofs: np.ndarray, gridValues: np.ndarray):
    """
    Update the grid values starting at ``ofs`` with ``gridValues``.

    :param str name: grid name.
    :param 3D-vector ofs: the values are inserted relative to this ``ofs``.
    :param 4D-vector gridValues: the data values to set.
    :return: Success
    """
    # Fetch the database handle.
    ret = getGridDB(name)
    if not ret.ok:
        return ret
    db, admin = ret.data
    gran, vecDim = admin['gran'], admin['vecDim']
    del admin, ret

    # Sanity check: ``ofs`` must denote a position in 3D space.
    if len(ofs) != 3:
        return RetVal(False, 'Invalid parameter values', None)

    # Sanity check: every ``gridValues`` must be a 3D matrix where every entry
    # is a vector with ``vecDim`` elements.
    if (len(gridValues.shape) != 4) or (gridValues.shape[3] != vecDim):
        return RetVal(False, 'Invalid gridValues dimension', None)

    # Populate the output array.
    bulk = db.initialize_unordered_bulk_op()
    for x in range(gridValues.shape[0]):
        for y in range(gridValues.shape[1]):
            for z in range(gridValues.shape[2]):
                # Convenience.
                val = gridValues[x, y, z, :]

                # Compute the grid position of the current data value and
                # convert it to integer indexes.
                pos = ofs + np.array([x, y, z])
                px, py, pz, strPos = _encodePosition(pos, gran)

                # Get database- query and entry.
                query, data = _encodeData(px, py, pz, strPos, val.tolist())

                if np.sum(np.abs(val)) < 1E-9:
                    bulk.find(query).remove()
                else:
                    bulk.find(query).upsert().update({'$set': data})

    # Execute the Mongo query. Don't bother with the return value.
    bulk.execute()
    return RetVal(True, None, None)
コード例 #20
0
def addCmdModifyBodyState(objID: str, body: dict):
    """
    Queue request to override the Body State of ``objID`` with ``body``.

    Other services, most notably Leonard, will periodically check for new
    announcements and incorporate them into the simulation as necessary.

    :param int objID: object to update.
    :param dict body: new object attributes.
    :return bool: Success
    """
    # Sanity check.
    if objID == '':
        msg = 'Invalid Object ID'
        logit.warning(msg)
        return RetVal(False, msg, None)

    # Make sure that ``body`` is really valid by constructing a new
    # DefaultRigidBody from it.
    body_sane = aztypes.DefaultRigidBody(**body)
    if body_sane is None:
        return RetVal(False, 'Invalid override data', None)

    # Recompute the AABBs if new collision shapes were provided.
    aabbs = None
    if 'cshapes' in body:
        if body_sane.cshapes is not None:
            ret = computeAABBs(body_sane.cshapes)
            if ret.ok:
                aabbs = ret.data

    # Build the original 'body' but from the sanitised version - just to be
    # sure.
    body = {k: v for (k, v) in body_sane._asdict().items() if k in body}
    del body_sane

    # Add the new body state and AABBs to the 'command' database from where
    # clients can read it at their leisure. Note that this will overwrite
    # already pending update commands for the same object - tough luck.
    db = datastore.getDSHandle('Commands')

    data = {'rbs': body, 'AABBs': aabbs}
    key = 'modify:{}'.format(objID)
    ops = {key: {'data': data}}
    db.put(ops)

    # This function was successful if exactly one document was updated.
    return RetVal(True, None, None)
コード例 #21
0
def setValues(name: str, posVals: (tuple, list)):
    """
    Update the grid values as specified in ``posVals``.

    :param list posVals: list of (pos, val) tuples.
    :return: Success
    """
    # Return immediately if we did not get any values.
    if len(posVals) == 0:
        return RetVal(False, '<setValues> received no arguments', None)

    # Fetch the database handle.
    ret = getGridDB(name)
    if not ret.ok:
        return ret
    db, admin = ret.data
    gran, vecDim = admin['gran'], admin['vecDim']
    del admin, ret

    # Ensure the region dimensions are positive integers.
    bulk = db.initialize_unordered_bulk_op()
    try:
        for pv in posVals:
            assert isinstance(pv, (tuple, list, np.ndarray))
            assert len(pv) == 2
            assert isinstance(pv[0], (tuple, np.ndarray))
            assert isinstance(pv[1], (tuple, np.ndarray))
            pos, val = pv
            assert len(pos) == 3
            assert len(val) == vecDim

            # Convert the position to grid indexes.
            px, py, pz, strPos = _encodePosition(pos, gran)

            # Get database- query and entry.
            query, data = _encodeData(px, py, pz, strPos, val.tolist())

            # Update the value in the DB, unless it is essentially zero, in
            # which case remove it to free up space.
            if np.sum(np.abs(val)) < 1E-9:
                bulk.find(query).remove()
            else:
                bulk.find(query).upsert().update({'$set': data})
    except AssertionError:
        return RetVal(False, '<setValues> received invalid arguments', None)

    bulk.execute()
    return RetVal(True, None, None)
コード例 #22
0
 def removeCounter(self, counter_name: str):
     """
     See docu in ``DatastoreBase``.
     """
     # Delete the specified counter.
     self.db.delete_one({'aid': counter_name})
     return RetVal(True, None, None)
コード例 #23
0
def getAllGridNames():
    """
    Return all the names of all currently defined grids.

    :return: grid names.
    :rtype: tuple of strings.
    """
    if _DB_Grid is None:
        return RetVal(False, 'Not initialised', None)
    else:
        # Every grid sits in its own collection. The grid names are hence the
        # grid names save 'system.indexes' which Mongo creates internally
        # itself.
        names = set(_DB_Grid.collection_names())
        names.discard('system.indexes')
        return RetVal(True, None, tuple(names))
コード例 #24
0
 def reset(self):
     """
     See docu in ``DatastoreBase``.
     """
     self.content = {}
     self.counters = {}
     return RetVal(True, None, None)
コード例 #25
0
    def reset(self):
        """
        See docu in ``DatastoreBase``.
        """
        # Make several attempts to connect to the database and flush it.
        for ii in range(10):
            try:
                # Delete the database. Then create a unique index on AID.
                self.db.drop()
                self.db.ensure_index([('aid', pymongo.ASCENDING)],
                                     background=False,
                                     unique=True)
                break
            except pymongo.errors.AutoReconnect as err:
                # An error occurred. According to the pymongo docu this means
                # we need to retry.
                time.sleep(0.2)
                self.connect()

                # Too many errors have occurred.
                if ii >= 8:
                    raise err

        # All good.
        return RetVal(True, None, None)
コード例 #26
0
    def test_connect_when_setupRabbitMQ_does_not_raise(self, m_setupRabbitMQ):
        """
        Handle the return values of setupRabbitMQ correctly when it does not
        raise any exceptions.
        """
        # Constructor must set 'rmq' to None and not connect to RabbitMQ.
        es = eventstore.EventStore(topics=['#'])
        assert es.rmq is None
        assert m_setupRabbitMQ.call_count == 0

        # If the 'rmq' is not None then setupRabbitMQ must not be called.
        es.rmq = {'x': 'y'}
        assert m_setupRabbitMQ.call_count == 0
        assert es.connect() == (True, None, None)
        assert m_setupRabbitMQ.call_count == 0
        assert es.rmq == {'x': 'y'}

        # If the 'rmq' is None then setupRabbitMQ must be called and its return
        # value stored in the 'rmq' instance variable.
        es.rmq = None
        m_setupRabbitMQ.reset_mock()
        m_setupRabbitMQ.return_value = RetVal(True, None, {'foo': 'bar'})
        assert m_setupRabbitMQ.call_count == 0
        assert es.connect() == (True, None, None)
        assert m_setupRabbitMQ.call_count == 1
        assert es.rmq == {'foo': 'bar'}
コード例 #27
0
    def remove(self, fnames: (tuple, list)):
        """
        Remove all files specified in ``fnames``.

        :param list[str] fname: the file names to retrieve.
        :return: number of deleted files.
        """
        # Iterate over each file and delete it.
        num_deleted = 0
        for fname in fnames:
            try:
                # File names are unique. However, multiple versions of the same
                # file may exist (an implicit GridFS feature).
                found_at_least_one = False
                for mid in self.fs.find({'filename': fname}):
                    self.fs.delete(mid._id)
                    found_at_least_one = True

                # Increment the counter by at most one, no matter how many
                # versions we found.
                if found_at_least_one:
                    num_deleted += 1
            except gridfs.errors.NoFile:
                pass
            except gridfs.errors.GridFSError:
                # All other GridFS errors.
                pass
        return RetVal(True, None, num_deleted)
コード例 #28
0
    def getAll(self, prj=None):
        """
        See docu in ``DatastoreBase``.
        """
        # Sanity check all arguments.
        if _checkGetAll(prj) is False:
            self.logit.warning('Invalid GETALL argument')
            return RetVal(False, 'Argument error', None)

        # Copy the requested documents into an output dictionary. If a key does
        # not exist then the output dictionary will contain None for the
        # respective values.
        docs = copy.deepcopy(self.content)
        if prj is not None:
            for doc in docs:
                docs[doc] = self.project(docs[doc], prj)

        return RetVal(True, None, docs)
コード例 #29
0
 def removeCounter(self, counter_name: str):
     """
     See docu in ``DatastoreBase``.
     """
     try:
         del self.counters[counter_name]
     except KeyError:
         pass
     return RetVal(True, None, None)
コード例 #30
0
    def reset(self):
        """
        Flush the constraint database.

        :return: success
        """
        self.db.reset()
        self._cache = {}
        return RetVal(True, None, None)
コード例 #31
0
ファイル: web.py プロジェクト: olitheolix/azrael
    def returnToClient(self, ret: RetVal):
        """
        Send ``ret`` to the Client via the Websocket.

        This is a convenience method to enhance readability.

        :param dict data: arbitrary data to pass back to client.
        :param str msg: text message to pass along.
        :return: None
        """
        # Convert the message to JSON.
        try:
            ret = json.dumps(ret._asdict())
        except (ValueError, TypeError):
            msg = 'Could not convert Clerk return value to JSON'
            ret = json.dumps(RetVal(False, msg, None)._asdict())

        # Send the message via HTTP.
        self.write_message(ret, binary=False)