Exemple #1
0
    async def store(self, oid, old_serial, writer, obj, txn):
        assert oid is not None

        pickled = writer.serialize()  # This calls __getstate__ of obj
        if len(pickled) >= self._large_record_size:
            logger.warning(f"Large object {obj.__class__}: {len(pickled)}")
        part = writer.part
        if part is None:
            part = 0

        statement_sql = self._sql.get("CR_NAIVE_UPSERT",
                                      self._objects_table_name)
        update = False
        if not obj.__new_marker__ and obj.__serial__ is not None:
            # we should be confident this is an object update
            statement_sql = self._sql.get("CR_UPDATE",
                                          self._objects_table_name)
            update = True

        conn = await txn.get_connection()
        async with txn._lock:
            try:
                result = await conn.fetch(
                    statement_sql,
                    oid,  # The OID of the object
                    txn._tid,  # Our TID
                    len(pickled),  # Len of the object
                    part,  # Partition indicator
                    writer.resource,  # Is a resource ?
                    writer.of,  # It belogs to a main
                    old_serial,  # Old serial
                    writer.parent_id,  # Parent OID
                    writer.id,  # Traversal ID
                    writer.type,  # Guillotina type
                    pickled,  # Pickle state)
                )
            except asyncpg.exceptions.UniqueViolationError as ex:
                if "duplicate key value (parent_id,id)" in ex.detail:
                    raise ConflictIdOnContainer(ex)
                raise
            except asyncpg.exceptions._base.InterfaceError as ex:
                if "another operation is in progress" in ex.args[0]:
                    raise ConflictError(
                        f"asyncpg error, another operation in progress.", oid,
                        txn, old_serial, writer)
                raise
            if update and len(result) != 1:
                # raise tid conflict error
                raise TIDConflictError(
                    f"Mismatch of tid of object being updated. This is likely "
                    f"caused by a cache invalidation race condition and should "
                    f"be an edge case. This should resolve on request retry.",
                    oid,
                    txn,
                    old_serial,
                    writer,
                )
        await txn._cache.store_object(obj, pickled)
    async def store(self, oid, old_serial, writer, obj, txn):
        assert oid is not None

        p = writer.serialize()  # This calls __getstate__ of obj
        if len(p) >= self._large_record_size:
            logger.warning(f"Large object {obj.__class__}: {len(p)}")
        part = writer.part
        if part is None:
            part = 0

        update = False
        statement_sql = NAIVE_UPSERT
        if not obj.__new_marker__ and obj._p_serial is not None:
            # we should be confident this is an object update
            statement_sql = UPDATE
            update = True

        async with txn._lock:
            smt = await txn._db_conn.prepare(statement_sql)
            try:
                result = await smt.fetch(
                    oid,                 # The OID of the object
                    txn._tid,            # Our TID
                    len(p),              # Len of the object
                    part,                # Partition indicator
                    writer.resource,     # Is a resource ?
                    writer.of,           # It belogs to a main
                    old_serial,          # Old serial
                    writer.parent_id,    # Parent OID
                    writer.id,           # Traversal ID
                    writer.type,         # Guillotina type
                    p                    # Pickle state)
                )
            except asyncpg.exceptions._base.InterfaceError as ex:
                if 'another operation is in progress' in ex.args[0]:
                    conflict_summary = self.get_conflict_summary(oid, txn, old_serial, writer)
                    raise ConflictError(
                        f'asyncpg error, another operation in progress.\n{conflict_summary}')
                raise
            if update and len(result) != 1:
                # raise tid conflict error
                conflict_summary = self.get_conflict_summary(oid, txn, old_serial, writer)
                raise TIDConflictError(
                    f'Mismatch of tid of object being updated. This is likely '
                    f'caused by a cache invalidation race condition and should '
                    f'be an edge case. This should resolve on request retry.\n'
                    f'{conflict_summary}')
Exemple #3
0
    async def store(self, oid, old_serial, writer, obj, txn):
        assert oid is not None

        pickled = writer.serialize()  # This calls __getstate__ of obj
        if len(pickled) >= self._large_record_size:
            log.warning(f"Large object {obj.__class__}: {len(pickled)}")
        json_dict = await writer.get_json()
        json = ujson.dumps(json_dict)
        part = writer.part
        if part is None:
            part = 0

        update = False
        statement_sql = NAIVE_UPSERT
        if not obj.__new_marker__ and obj._p_serial is not None:
            # we should be confident this is an object update
            statement_sql = UPDATE
            update = True

        async with txn._lock:
            smt = await txn._db_conn.prepare(statement_sql)
            try:
                result = await smt.fetch(
                    oid,  # The OID of the object
                    txn._tid,  # Our TID
                    len(pickled),  # Len of the object
                    part,  # Partition indicator
                    writer.resource,  # Is a resource ?
                    writer.of,  # It belogs to a main
                    old_serial,  # Old serial
                    writer.parent_id,  # Parent OID
                    writer.id,  # Traversal ID
                    writer.type,  # Guillotina type
                    json,  # JSON catalog
                    pickled  # Pickle state)
                )
            except asyncpg.exceptions.ForeignKeyViolationError:
                txn.deleted[obj._p_oid] = obj
                raise TIDConflictError(
                    f'Bad value inserting into database that could be caused '
                    f'by a bad cache value. This should resolve on request retry.',
                    oid, txn, old_serial, writer)
            except asyncpg.exceptions._base.InterfaceError as ex:
                if 'another operation is in progress' in ex.args[0]:
                    raise ConflictError(
                        f'asyncpg error, another operation in progress.', oid,
                        txn, old_serial, writer)
                raise
            except asyncpg.exceptions.DeadlockDetectedError:
                raise ConflictError(f'Deadlock detected.', oid, txn,
                                    old_serial, writer)
            if len(result) != 1 or result[0]['count'] != 1:
                if update:
                    # raise tid conflict error
                    raise TIDConflictError(
                        f'Mismatch of tid of object being updated. This is likely '
                        f'caused by a cache invalidation race condition and should '
                        f'be an edge case. This should resolve on request retry.',
                        oid, txn, old_serial, writer)
                else:
                    log.error('Incorrect response count from database update. '
                              'This should not happen. tid: {}'.format(
                                  txn._tid))
        await txn._cache.store_object(obj, pickled)
Exemple #4
0
    async def store(self, oid, old_serial, writer, obj, txn):
        assert oid is not None

        pickled = writer.serialize()  # This calls __getstate__ of obj
        if len(pickled) >= self._large_record_size:
            log.info(f"Large object {obj.__class__}: {len(pickled)}")
        if self._store_json:
            json_dict = await writer.get_json()
            json = orjson.dumps(json_dict).decode("utf-8")
        else:
            json = None
        part = writer.part
        if part is None:
            part = 0

        update = False
        statement_sql = self._sql.get("NAIVE_UPSERT", self._objects_table_name)
        if not obj.__new_marker__ and obj.__serial__ is not None:
            # we should be confident this is an object update
            statement_sql = self._sql.get("UPDATE", self._objects_table_name)
            update = True

        conn = await txn.get_connection()
        async with txn._lock:
            try:
                result = await conn.fetch(
                    statement_sql,
                    oid,  # The OID of the object
                    txn._tid,  # Our TID
                    len(pickled),  # Len of the object
                    part,  # Partition indicator
                    writer.resource,  # Is a resource ?
                    writer.of,  # It belogs to a main
                    old_serial,  # Old serial
                    writer.parent_id,  # Parent OID
                    writer.id,  # Traversal ID
                    writer.type,  # Guillotina type
                    json,  # JSON catalog
                    pickled,  # Pickle state)
                )
            except asyncpg.exceptions.UniqueViolationError as ex:
                if "Key (parent_id, id)" in ex.detail or "Key (of, id)" in ex.detail:
                    raise ConflictIdOnContainer(ex)
                raise
            except asyncpg.exceptions.ForeignKeyViolationError:
                txn.deleted[obj.__uuid__] = obj
                raise TIDConflictError(
                    f"Bad value inserting into database that could be caused "
                    f"by a bad cache value. This should resolve on request retry.",
                    oid,
                    txn,
                    old_serial,
                    writer,
                )
            except asyncpg.exceptions._base.InterfaceError as ex:
                if "another operation is in progress" in ex.args[0]:
                    raise ConflictError(
                        f"asyncpg error, another operation in progress.", oid, txn, old_serial, writer
                    )
                raise
            except asyncpg.exceptions.DeadlockDetectedError:
                raise ConflictError(f"Deadlock detected.", oid, txn, old_serial, writer)
            if len(result) != 1 or result[0]["count"] != 1:
                if update:
                    # raise tid conflict error
                    raise TIDConflictError(
                        f"Mismatch of tid of object being updated. This is likely "
                        f"caused by a cache invalidation race condition and should "
                        f"be an edge case. This should resolve on request retry.",
                        oid,
                        txn,
                        old_serial,
                        writer,
                    )
                else:
                    log.error(
                        "Incorrect response count from database update. "
                        "This should not happen. tid: {}".format(txn._tid)
                    )
        await txn._cache.store_object(obj, pickled)