def round_trip(self, doc): bson_bytes = to_bson(doc) self.assertEqual(bson_bytes, bsonjs.loads(bsonjs.dumps(bson_bytes))) # Check compatibility between bsonjs and json_util self.assertEqual(doc, json_util.loads( bsonjs.dumps(bson_bytes), json_options=json_util.STRICT_JSON_OPTIONS)) self.assertEqual(bson_bytes, bsonjs.loads(json_util.dumps( doc, json_options=json_util.STRICT_JSON_OPTIONS)))
def main(): with open(inputf(), 'rb') as f: decoded_doc = bsonjs.dumps(f.read()) decoded_doc = decoded_doc.replace(" inf ", ' "inf" ').replace( " inf,", ' "inf",') with open(resultf(), "w", encoding='utf8') as of: of.writelines(decoded_doc) data = json.loads(decoded_doc) parsed_data = {} for key in data.keys(): print([key]) obj = data[key] buf = {} if isinstance(obj, list): buf = [] data_walker(buf, data[key], 0) elif isinstance(obj, dict) and obj.get("Name", None) is not None: data_walker(buf, data[key], 0) parsed_data[key] = buf with open(jsonresultf(), "w", encoding='utf8') as of: of.writelines(json.dumps(parsed_data, indent=4, sort_keys=False)) download_files(parsed_data)
def process_callback(probe, hit): if __name__ == "__main__": with self.lock: # print("[", hit.ns, "] [", hit.tid, "]", probe) # TODO: abstract away bson logic/ make this less awk # check for errors: errname = "bson_err" if errname in hit.args: err = hit.args[errname] print("ERROR", error_strings[err]) self.counters["BsonErrors"].encounter(error_strings[err]) # we can have at most one bson per probe elif self._probe_has_bson(probe): bson = hit.args["bson"] sz = hit.args["bson_sz"] # attempt to parse long string as bson try: rbson = raw_bson.RawBSONDocument(bson) sbson = dumps(rbson.raw) hit.args["bson"] = sbson self.counters["BsonErrors"].encounter("success") except Exception as e: #out = "" #for b in bson: # out += str(hex(b)) #print(out) #print(e) self.counters["BsonErrors"].encounter("BAD_BSON")
def process_callback(probe, hit): opCtx = hit.args['opCtx'] with FindToAggTimeTable.classLock: FindToAggTimeTable.dataDict[opCtx] = { 'aggQuery': dumps(hit.args['aggQuery']) } print_if_ready(opCtx)
def export(collection): """ Exports a MongoDB collection's documents to standard JSON and then outputs it to stdout. """ for document in collection.find(): bson_json = bsonjs.dumps(document.raw) json_object = json.loads(bson_json) sys.stdout.buffer.write(json.dumps(convert(json_object))) sys.stdout.buffer.write(b"\n")
def beginQueryHit(cpu, data, size): event = ct.cast(data, ct.POINTER(BeginQuery)).contents print(event.bson) print(event.queryObjSz) print(ct.cast(event.bson, ct.POINTER(ct.c_byte))) print(len(event.bson)) print(bytes(event.bson)) bson = bytes(ct.cast(event.bson, ct.POINTER(ct.c_byte))) bson = bson[:event.queryObjSz] print(bson) print(bson.hex()) assert False rbson = raw_bson.RawBSONDocument(bson) print("Namespace: ", str(event.nss, 'utf-8')) print(" had query ", dumps(rbson.raw)) print(" which returned ", event.nreturn, " and skipped ", event.nskip)
def process_callback(probe, hit): if __name__ == "__main__": self.lk.acquire() print("----", probe, "----") # check for errors: errname = "bson_err" if errname in hit.args: err = hit.args[errname] print("ERROR", error_strings[err]) if err == errors["KERNEL_FAULT"]: self.kernel_faults = self.kernel_faults + 1 elif err == errors["KEY_ERROR"]: self.key_errs = self.key_errs + 1 else: self.others = self.others + 1 else: ptr = hit.args["ptr"] bson = hit.args["bson"] sz = hit.args["bson_sz"] print("BSON REC'VED: [{}] [{}/{} bytes]".format( ptr, len(bson), sz)) try: rbson = raw_bson.RawBSONDocument(bson) print(dumps(rbson.raw)) self.successful = self.successful + 1 except Exception as e: out = "" for b in bson: out += str(hex(b)) print(out) print(e) self.bad_bson = self.bad_bson + 1 self.lk.release()
def process_callback(probe, hit): opCtx = hit.args['opCtx'] with QueryOpBeginTimeTable.classLock: QueryOpBeginTimeTable.dataDict[opCtx] = dict() nss = str(hit.args['nss'], 'utf-8') QueryOpBeginTimeTable.dataDict[opCtx]['nss'] = nss try: QueryOpBeginTimeTable.dataDict[opCtx]['bson'] = dumps( raw_bson.RawBSONDocument(hit.args['bson']).raw) except: #TODO: need to determine why we read out invalid BSON at times #Ideas include concurrency issues (read while writing), or a subtle #map usage error (like needing a per-cpu array map vs just an array map) #haven't experienced an invalid BSON issue with other probes here though, #which is interesting print("THIS IS INVALID BSON: ") print(hit.args['bson'].hex()) if hit.args['ntoreturn'] != -1: QueryOpBeginTimeTable.dataDict[opCtx][ 'ntoreturn'] = hit.args['ntoreturn'] if hit.args['ntoskip'] != -1: QueryOpBeginTimeTable.dataDict[opCtx][ 'ntoskip'] = hit.args['ntoskip'] print_if_ready(opCtx)
def get(): db = MongoUtils.get_db() return bsonjs.dumps(db['users'].find_one().raw)
def get_data(): habits = (flask_back.DB.parser.get_person_data(request.args.get('_id'))) json_record2 = bsonjs.dumps(bson.BSON.encode({'results': habits})) return json_record2
#!/usr/bin/env python3 import bsonjs import os import sys import json if len(sys.argv) < 2: print("Missing argument") print(f"Usage : {os.path.basename(sys.argv[0])} <bson_file>") exit(-1) bson_file_path = sys.argv[1] with open(bson_file_path, "rb") as bson_file: bson_bytes = bson_file.read() beson_string = bsonjs.dumps(bson_bytes) json_formatted = json.loads(beson_string) json_formatted["pcap"]["$binary"] = "<REMOVED>" print(json.dumps(json_formatted, indent=4, sort_keys=True))
def delete${KEY}(ID): # noqa: E501 """Delete the ${KEY} instance based on ID # noqa: E501 :param ID: :type ID: str :rtype: None """ skey = '{}|{}'.format(KEY, ID) r.delete(skey) r.delete(KEY) q = {'_id': ID} doc_mongo = collection.find_one_and_delete(q) doc_str = bsonjs.dumps(doc_mongo.raw) qKey='{}{}'.format(KEY, dpath(json.loads(doc_str))) publisher(doc_str, routing_key='{}.delete'.format(qKey), retry=True, declare=createQueues(qKey), headers=q) def get_all${KEY}(offset=0, limit=20, q=None, p=None, sort=None, order=1): # noqa: E501 """Get the list of all ${KEY} # noqa: E501 :param offset: The number of items to skip before starting to collect the result set. :type offset: int :param limit: The numbers of items to return. :type limit: int
def process_callback(probe, hit): bson = hit.args['bson'] opCtx = hit.args["opCtx"] with FindCmdTimeTable.classLock: FindCmdTimeTable.dataDict[opCtx] = dumps(bson) print_if_ready(opCtx)
def bsonjs_dumps(doc): """Provide same API as json_util.dumps""" return bsonjs.dumps(to_bson(doc))
def test_dumps_multiple_bson_documents(self): json_str = '{ "test" : "me" }' bson_bytes = bsonjs.loads(json_str) self.assertEqual(json_str, bsonjs.dumps(bson_bytes + bson_bytes))