Esempio n. 1
0
def request_push(push_url: str,
                 extra_data=None,
                 encode=None,
                 decode=None,
                 headers=None,
                 timeout=None,
                 extensions=True,
                 session=None) -> Dict[str, Any]:
    message = PushMessage()
    message.latest_version_id = core.get_latest_version_id(session=session)
    compress(session=session)
    message.add_unversioned_operations(session=session,
                                       include_extensions=extensions)

    if not message.operations:
        return {}

    message.set_node(session.query(Node).order_by(Node.node_id.desc()).first())

    data = message.to_json()
    data.update({'extra_data': extra_data or {}})

    code, reason, response = post_request(push_url, data, encode, decode,
                                          headers, timeout)

    if (code // 100 != 2) or response is None:
        if suggests_pull is not None and suggests_pull(code, reason, response):
            raise PullSuggested(code, reason, response)
        raise PushRejected(code, reason, response)
    new_version_id = response.get('new_version_id')
    if new_version_id is None:
        raise PushRejected(
            code, reason, {
                'error': "server didn't respond with new version id",
                'response': response
            })
    # Who should set the dates? Maybe send a complete Version from the
    # server. For now the field is ignored, so it doesn't matter.
    session.add(
        Version(version_id=new_version_id, created=datetime.datetime.now()))
    for op in message.operations:
        op.version_id = new_version_id
    # return the response for the programmer to do what she wants
    # afterwards
    return response
Esempio n. 2
0
def repair_database(message, latest_version_id, session=None):
    if not isinstance(message, BaseMessage):
        raise TypeError("need an instance of dbsync.messages.base.BaseMessage "\
                            "to perform the repair operation")
    # clear local database
    for model in core.synched_models.models:
        session.query(model).delete(synchronize_session=False)
    # clear the local operations and versions
    session.query(Operation).delete(synchronize_session=False)
    session.query(Version).delete(synchronize_session=False)
    session.expire_all()
    # load the fetched database
    obj_count = 0
    batch_size = 500
    for modelkey in core.synched_models.model_names:
        for obj in message.query(modelkey):
            session.add(obj)
            obj_count += 1
            if obj_count % batch_size == 0:
                session.flush()
    # load the new version, if any
    if latest_version_id is not None:
        session.add(Version(version_id=latest_version_id))
Esempio n. 3
0
    def listener(mapper, connection, target, session=None) -> None:
        logger.info(f"tracking {target}")
        if getattr(core.SessionClass.object_session(target),
                   core.INTERNAL_SESSION_ATTR, False):
            logger.debug(f"internal session object not tracked: {target}")
            return
        if not core.listening:
            logger.warning(
                "dbsync is disabled; "
                "aborting listener to '{0}' command".format(command))
            return
        if command == 'u' and not core.SessionClass.object_session(target).\
                is_modified(target, include_collections=False):
            logger.debug(f"updated and not modified -> no tracking: {target}")
            return

        mt = mapper.mapped_table
        if isinstance(mt, Join):
            tname = mt.right.name
        else:
            tname = mt.name

        if tname not in core.synched_models.tables:
            logging.error("you must track a mapped class to table {0} "\
                              "to log operations".format(tname))
            return
        # one version for each operation
        # TODO: can be minimized by collecting ops in one flush queue
        try:
            call_before_tracking_fn(session, command, target)
        except SkipOperation:
            logger.info(f"skip operation for {target}")
            return
        # TODO:
        # we should try to make only one version per transaction
        # so adding a new version should happen in the flush
        # is not so easy but we should do that
        # perhaps by creating a version for each new session.begin,
        # holding that version until the flush/commit
        # other idea:
        # for the first time an operation is added to this session,
        # we create a version object and pin it to the session(session.current_version=version)
        # point the operations to this version
        # and finally during flush() the operations have only one session
        # problem is that the @core.session_committing decorator creates a new
        # session for each call to this function => have to dig deeper
        target_session = object_session(target)

        def clear_on_flush(session, flush_context):
            session.__current_version__ = None

        version = Version(created=datetime.datetime.now())
        if not getattr(target_session, '__current_version__', None):
            target_session.__current_version__ = Version(
                created=datetime.datetime.now())
            session.add(target_session.__current_version__)
            event.listen(target_session, "after_flush", clear_on_flush)
        version = target_session.__current_version__

        logger.info(f"new version: {version.version_id}")
        pk = getattr(target, mapper.primary_key[0].name)
        op = Operation(row_id=pk,
                       content_type_id=core.synched_models.tables[tname].id,
                       command=command)
        session.add(version)
        call_after_tracking_fn(session, op, target)
        session.add(op)
        op.version = version
Esempio n. 4
0
async def handle_push(connection: Connection,
                      session: sqlalchemy.orm.Session) -> Optional[int]:
    msgs_got = 0
    version: Optional[Version] = None
    async for msg in connection.socket:
        msgs_got += 1
        msg_json = json.loads(msg)
        pushmsg = PushMessage(msg_json)
        # print(f"pushmsg: {msg}")
        if not pushmsg.operations:
            logger.warn("empty operations list in client PushMessage")
        for op in pushmsg.operations:
            logger.info(f"operation: {op}")
        # await connection.socket.send(f"answer is:{msg}")
        logger.info(f"message key={pushmsg.key}")

        latest_version_id = core.get_latest_version_id(session=session)
        logger.info(
            f"** version on server:{latest_version_id}, version in pushmsg:{pushmsg.latest_version_id}"
        )
        if latest_version_id != pushmsg.latest_version_id:
            exc = f"version identifier isn't the latest one; " \
                  f"incoming: {pushmsg.latest_version_id}, on server:{latest_version_id}"

            if latest_version_id is None:
                logger.warn(exc)
                raise PushRejected(exc)
            if pushmsg.latest_version_id is None:
                logger.warn(exc)
                raise PullSuggested(exc)
            if pushmsg.latest_version_id < latest_version_id:
                logger.warn(exc)
                raise PullSuggested(exc)
            raise PushRejected(exc)
        if not pushmsg.islegit(session):
            raise PushRejected("message isn't properly signed")

        for listener in before_push:
            listener(session, pushmsg)

        # I) detect unique constraint conflicts and resolve them if possible
        unique_conflicts = find_unique_conflicts(pushmsg, session)
        conflicting_objects = set()
        for uc in unique_conflicts:
            obj = uc['object']
            conflicting_objects.add(obj)
            for key, value in zip(uc['columns'], uc['new_values']):
                setattr(obj, key, value)
        for obj in conflicting_objects:
            make_transient(obj)  # remove from session
        for model in set(type(obj) for obj in conflicting_objects):
            pk_name = get_pk(model)
            pks = [
                getattr(obj, pk_name) for obj in conflicting_objects
                if type(obj) is model
            ]
            session.query(model).filter(getattr(model, pk_name).in_(pks)). \
                delete(synchronize_session=False)  # remove from the database
        session.add_all(conflicting_objects)  # reinsert
        session.flush()

        # II) perform the operations
        operations = [
            o for o in pushmsg.operations if o.tracked_model is not None
        ]
        post_operations: List[Tuple[Operation, SQLClass,
                                    Optional[SQLClass]]] = []
        try:
            op: Operation
            for op in operations:
                (obj,
                 old_obj) = await op.perform_async(pushmsg, session,
                                                   pushmsg.node_id,
                                                   connection.socket)

                if obj is not None:
                    # if the op has been skipped, it wont be appended for post_operation handling
                    post_operations.append((op, obj, old_obj))

                    resp = dict(type="info",
                                op=dict(
                                    row_id=op.row_id,
                                    version=op.version,
                                    command=op.command,
                                    content_type_id=op.content_type_id,
                                ))
                    call_after_tracking_fn(session, op, obj)
                    await connection.socket.send(json.dumps(resp))

        except OperationError as e:
            logger.exception(
                "Couldn't perform operation in push from node %s.",
                pushmsg.node_id)
            raise PushRejected("at least one operation couldn't be performed",
                               *e.args)

        # III) insert a new version
        if post_operations:  # only if operations have been done -> create the new version
            version = Version(created=datetime.datetime.now(),
                              node_id=pushmsg.node_id)
            session.add(version)

        # IV) insert the operations, discarding the 'order' column
        accomplished_operations = [
            op for (op, obj, old_obj) in post_operations
        ]
        for op in sorted(accomplished_operations, key=attr('order')):
            new_op = Operation()
            for k in [k for k in properties_dict(op) if k != 'order']:
                setattr(new_op, k, getattr(op, k))
            session.add(new_op)
            new_op.version = version
            session.flush()

        for op, obj, old_obj in post_operations:
            op.call_after_operation_fn(session, obj, old_obj)
            # from woodmaster.model.sql.model import WoodPile, Measurement
            # orphans = session.query(Measurement).filter(Measurement.woodpile_id == None).all()
            # print(f"orphans:{orphans}")

        for listener in after_push:
            listener(session, pushmsg)

        # return the new version id back to the client
        logger.info(f"version is: {version}")
        if version:
            await connection.socket.send(
                json.dumps(
                    dict(type="result", new_version_id=version.version_id)))
            return {'new_version_id': version.version_id}
        else:
            await connection.socket.send(
                json.dumps(dict(type="result", new_version_id=None)))
            logger.info("sent nothing message")
            await connection.socket.close()

    logger.info("push ready")
Esempio n. 5
0
def handle_push(data: Dict[str, Any],
                session: Optional[Session] = None) -> Dict[str, int]:
    """
    Handle the push request and return a dictionary object to be sent
    back to the node.

    If the push is rejected, this procedure will raise a
    dbsync.server.handlers.PushRejected exception.

    *data* must be a dictionary-like object, usually the product of
    parsing a JSON string.
    """
    message: PushMessage
    try:
        message = PushMessage(data)
    except KeyError:
        raise PushRejected("request object isn't a valid PushMessage", data)
    latest_version_id = core.get_latest_version_id(session=session)
    if latest_version_id != message.latest_version_id:
        exc = "version identifier isn't the latest one; "\
            "given: %s" % message.latest_version_id
        if latest_version_id is None:
            raise PushRejected(exc)
        if message.latest_version_id is None:
            raise PullSuggested(exc)
        if message.latest_version_id < latest_version_id:
            raise PullSuggested(exc)
        raise PushRejected(exc)
    if not message.operations:
        return {}
        # raise PushRejected("message doesn't contain operations")
    if not message.islegit(session):
        raise PushRejected("message isn't properly signed")

    for listener in before_push:
        listener(session, message)

    # I) detect unique constraint conflicts and resolve them if possible
    unique_conflicts = find_unique_conflicts(message, session)
    conflicting_objects = set()
    for uc in unique_conflicts:
        obj = uc['object']
        conflicting_objects.add(obj)
        for key, value in zip(uc['columns'], uc['new_values']):
            setattr(obj, key, value)
    for obj in conflicting_objects:
        make_transient(obj)  # remove from session
    for model in set(type(obj) for obj in conflicting_objects):
        pk_name = get_pk(model)
        pks = [
            getattr(obj, pk_name) for obj in conflicting_objects
            if type(obj) is model
        ]
        session.query(model).filter(getattr(model, pk_name).in_(pks)).\
            delete(synchronize_session=False) # remove from the database
    session.add_all(conflicting_objects)  # reinsert
    session.flush()

    # II) perform the operations
    operations = [o for o in message.operations if o.tracked_model is not None]
    try:
        for op in operations:
            op.perform(message, session, message.node_id)
    except OperationError as e:
        logger.exception("Couldn't perform operation in push from node %s.",
                         message.node_id)
        raise PushRejected("at least one operation couldn't be performed",
                           *e.args)

    # III) insert a new version
    version = Version(created=datetime.datetime.now(), node_id=message.node_id)
    session.add(version)

    # IV) insert the operations, discarding the 'order' column
    for op in sorted(operations, key=attr('order')):
        new_op = Operation()
        for k in [k for k in properties_dict(op) if k != 'order']:
            setattr(new_op, k, getattr(op, k))
        session.add(new_op)
        new_op.version = version
        session.flush()

    for listener in after_push:
        listener(session, message)

    # return the new version id back to the node
    return {'new_version_id': version.version_id}
Esempio n. 6
0
    async def run_push(
        self,
        session: Optional[sqlalchemy.orm.session.Session] = None
    ) -> Optional[int]:
        new_version_id: Optional[int]
        if not session:
            session = self.Session()

        message = self.create_push_message(session=session)

        logger.info(f"number of client operations: {len(message.operations)}")

        if not message.operations:
            logger.info("empty client operations list")
            # return None

        node = session.query(Node).order_by(Node.node_id.desc()).first()
        message.set_node(
            node
        )  # TODO to should be migrated to GUID and ordered by creation date
        logger.info(f"message key={message.key}")
        logger.info(f"message secret={message._secret}")
        message_json = message.to_json(include_operations=True)
        # message_encoded = encode_dict(PushMessage)(message_json)
        message_encoded = json.dumps(message_json,
                                     cls=SyncdbJSONEncoder,
                                     indent=4)

        # here it happens
        logger.info("sending message to server")
        await self.websocket.send(message_encoded)
        logger.info("message sent to server")
        session.commit()
        # logger.debug(f"message: {message_encoded}")
        new_version_id = None
        # accept incoming requests for payload data (optional)
        logger.debug(f"wait for message from server")
        async for msg_ in self.websocket:
            logger.debug(f"client:{self.id} msg: {msg_}")
            msg = json.loads(msg_)
            # logger.debug(f"msg: {msg}")
            if msg['type'] == "request_field_payload":
                logger.info(f"obj from server:{msg}")
                await self.send_field_payload(session, msg)
            elif msg['type'] == 'result':
                new_version_id = msg['new_version_id']
                if new_version_id is None:
                    break
            else:
                logger.debug(f"response from server:{msg}")

        logger.debug(f"closing sessions")
        # else:
        #     print("ENDE:")
        # EEEEK TODO this is to prevent sqlite blocking due to other sessions
        session.close_all()

        if new_version_id is None:
            return None

        session = self.Session()

        # because of the above reason (all sessions closed) we have to reselect the operations for updating
        for (i, op) in enumerate(message.operations):
            ops1 = session.query(Operation).filter(
                Operation.row_id == op.row_id, ).all()
            for op1 in ops1:
                op1.version_id = new_version_id

        logger.info(f"new version {new_version_id}")
        session.add(Version(version_id=new_version_id, created=datetime.now()))

        session.commit()
        return new_version_id