Exemplo n.º 1
0
def parse_update(data, length):
    #struct OP_UPDATE {
    #    MsgHeader header;             // standard message header
    #    int32     ZERO;               // 0 - reserved for future use
    #    cstring   fullCollectionName; // "dbname.collectionname"
    #    int32     flags;              // bit vector. see below
    #    document  selector;           // the query to select the document
    #    document  update;             // specification of the update to perform
    #}
    val, pos = bson._get_int(data, 16)
    collection, pos = bson._get_c_string(data, pos)
    flags, pos = bson._get_int(data, pos)
    selector, update = "", ""
    try:
        o = bson.decode_all(data[pos:length])
        selector, update = o
    except Exception as e:
        logger.exception()

    # flags.
    # 0	Upsert	If set, the database will insert the supplied object into the collection if no matching document is found.
    # 1	MultiUpdate	If set, the database will update all matching objects in the collection. Otherwise only updates first matching doc.
    # 2-31	Reserved	Must be set to 0.
    upsert = check_bit(flags, 0)
    multi_update = check_bit(flags, 1)

    return Operation(operation=OP_UPDATE,
                     upsert=upsert,
                     multi_update=multi_update,
                     collection=collection,
                     selector=selector,
                     update=update)
Exemplo n.º 2
0
def parse_delete(data, length):
    # struct {
    #     MsgHeader header;             // standard message header
    #     int32     ZERO;               // 0 - reserved for future use
    #     cstring   fullCollectionName; // "dbname.collectionname"
    #     int32     flags;              // bit vector - see below for details.
    #     document  selector;           // query object.  See below for details.
    # }
    # flags:
    # 0	SingleRemove	If set, the database will remove only the first matching document in the collection. Otherwise all matching documents will be removed.
    # 1-31	Reserved	Must be set to 0.
    zero, pos = bson._get_int(data, 16)
    collection, pos = bson._get_c_string(data, pos)
    flags, pos = bson._get_int(data, pos)
    single_remove = check_bit(flags, 0)
    try:
        o = bson.decode_all(data[pos:length])
        selector = o[0]
    except Exception as e:
        selector = ""
        logger.exception("exception on bson decode")
    return Operation(operation=OP_DELETE,
                     collection=collection,
                     single_remove=single_remove,
                     selector=selector)
Exemplo n.º 3
0
def parse_query(data, length):
    # struct OP_QUERY {
    #     MsgHeader header;                 // standard message header
    #     int32     flags;                  // bit vector of query options.  See below for details.
    #     cstring   fullCollectionName ;    // "dbname.collectionname"
    #     int32     numberToSkip;           // number of documents to skip
    #     int32     numberToReturn;         // number of documents to return
    #                                       //  in the first OP_REPLY batch
    #     document  query;                  // query object.  See below for details.
    #   [ document  returnFieldsSelector; ] // Optional. Selector indicating the fields
    #                                       //  to return.  See below for details.
    # }
    # flags:
    # 0   Reserved    Must be set to 0.
    # 1   TailableCursor  Tailable means cursor is not closed when the last data is retrieved.
    #      Rather, the cursor marks the final object's position. You can resume using the cursor later, from where it was located,
    #     if more data were received. Like any "latent cursor",
    #      the cursor may become invalid at some point (CursorNotFound) – for example if the final object it references were deleted.
    # 2   SlaveOk Allow query of replica slave. Normally these return an error except for namespace "local".
    # 3   OplogReplay Internal replication use only - driver should not set
    # 4   NoCursorTimeout The server normally times out idle cursors after an inactivity period (10 minutes) to prevent excess memory use. Set this option to prevent that.
    # 5   AwaitData   Use with TailableCursor. If we are at the end of the data, block for a while rather than returning no data. After a timeout period, we do return as normal.
    # 6   Exhaust Stream the data down full blast in multiple "more" packages, on the assumption that the client will fully read all data queried. Faster when you are pulling a lot of data and know you want to pull it all down. Note: the client is not allowed to not read all the data unless it closes the connection.
    # 7   Partial Get partial results from a mongos if some shards are down (instead of throwing an error)
    # 8-31    Reserved    Must be set to 0.
    flags, pos = bson._get_int(data, 16)
    tailable_cursor = check_bit(flags, 1)
    slave_ok = check_bit(flags, 2)
    oplog_replay = check_bit(flags, 3)
    no_cursor_timeout = check_bit(flags, 4)
    await_data = check_bit(flags, 5)
    exhaust = check_bit(flags, 6)
    partial = check_bit(flags, 7)

    collection, pos = bson._get_c_string(data, pos)
    number_to_skip, pos = bson._get_int(data, pos)
    number_to_return, pos = bson._get_int(data, pos)
    fields_to_return = None
    query = ""
    try:
        o = bson.decode_all(data[pos:length])
        query = o[0]
    except bson.InvalidBSON as e:
        o = []
        logger.exception("exception on bson decode")

    if len(o) == 2:
        fields_to_return = o[1]
    return Operation(operation=OP_QUERY,fields_to_return=fields_to_return,
                        tailable_cursor = tailable_cursor,
                        slave_ok = slave_ok,
                        oplog_replay = oplog_replay,
                        no_cursor_timeout = no_cursor_timeout,
                        number_to_skip=number_to_skip,
                        number_to_return=number_to_return,
                        await_data = await_data,
                        exhaust = exhaust,
                        partial = partial,
                        query=query)
Exemplo n.º 4
0
def parse_kill_cursors(data, length):
    #struct {
    #    MsgHeader header;            // standard message header
    #    int32     ZERO;              // 0 - reserved for future use
    #    int32     numberOfCursorIDs; // number of cursorIDs in message
    #    int64*    cursorIDs;         // sequence of cursorIDs to close
    #}
    zero, pos = bson._get_int(data, 16)
    number_of_cursor_ids, pos = bson._get_int(data, pos)
    cursor_ids = []
    for i in range(0, number_of_cursor_ids):
        cursor_id, pos = bson._get_long(data, pos)
        cursor_ids.append(cursor_id)
    return Operation(operation=OP_KILL_CURSORS, number_of_cursor_ids=number_of_cursor_ids, cursor_ids=cursor_ids)
Exemplo n.º 5
0
def parse_get_more(data, length):
    #struct {
    #    MsgHeader header;             // standard message header
    #    int32     ZERO;               // 0 - reserved for future use
    #    cstring   fullCollectionName; // "dbname.collectionname"
    #    int32     numberToReturn;     // number of documents to return
    #    int64     cursorID;           // cursorID from the OP_REPLY
    #}
    zero, pos = bson._get_int(data, 16)
    collection, pos = bson._get_c_string(data, pos)
    numberToReturn, pos = bson._get_int(data, pos)
    cursorID, pos = bson._get_long(data, pos)

    return Operation(operation=OP_GET_MORE, collection=collection,numberToReturn=numberToReturn, cursorID=cursorID)
Exemplo n.º 6
0
def parse_insert(data, length):
    #struct {
    #    MsgHeader header;             // standard message header
    #    int32     flags;              // bit vector - see below
    #    cstring   fullCollectionName; // "dbname.collectionname"
    #    document* documents;          // one or more documents to insert into the collection
    #}
    # flags
    # 0	ContinueOnError	If set, the database will not stop processing a bulk insert if one fails
    # (eg due to duplicate IDs). This makes bulk insert behave similarly to a series of single inserts,
    # except lastError will be set if any insert fails, not just the last one. If multiple errors occur,
    # only the most recent will be reported by getLastError. (new in 1.9.1)
    # 1-31	Reserved	Must be set to 0.
    flags, pos = bson._get_int(data, 16)
    continue_on_error = check_bit(flags, 0)
    collection, pos = bson._get_c_string(data, pos)
    try:
        o = bson.decode_all(data[pos:])
    except bson.InvalidBSON as e:
        o = []
        logger.exception("exception on bson decode")

    return Operation(operation=OP_INSERT,
                     collection=collection,
                     continue_on_error=continue_on_error,
                     documents=o)
Exemplo n.º 7
0
def parse_reply(data, length):
    #     struct {
    #     MsgHeader header;         // standard message header
    #     int32     responseFlags;  // bit vector - see details below
    #     int64     cursorID;       // cursor id if client needs to do get more's
    #     int32     startingFrom;   // where in the cursor this reply is starting
    #     int32     numberReturned; // number of documents in the reply
    #     document* documents;      // documents
    # }
    flags, pos = bson._get_int(data, 16)
    cursor_id, pos = bson._get_long(data, pos,as_class=None, tz_aware=False, uuid_subtype=3)
    starting_from, pos = bson._get_int(data, pos)
    number_returned, pos = bson._get_int(data, pos)
    try:
        o = bson.decode_all(data[pos:length])
    except Exception as e:
        o = []
        logger.exception("exception on bson decode in parse_reply")
        logger.info(repr(data[pos:length]))

    return Operation(operation=OP_REPLY, cursor_id=cursor_id,
                     starting_from=starting_from, number_returned=number_returned,
                     documents=o)