def test_reconstitute_multiple_schemas(self):
     fdp = FileDescriptorProto()
     fdp.name = 'test_schemas'
     mt1 = fdp.message_type.add()
     mt1.name = 'test1'
     f1 = mt1.field.add()
     f1.name = 'a'
     f1.number = 1
     f1.type = FieldDescriptorProto.TYPE_UINT32
     f1.label = FieldDescriptorProto.LABEL_OPTIONAL
     mt2 = fdp.message_type.add()
     mt2.name = 'test2'
     f2 = mt2.field.add()
     f2.name = 'b'
     f2.number = 1
     f2.type = FieldDescriptorProto.TYPE_STRING
     f2.label = FieldDescriptorProto.LABEL_OPTIONAL
     bytes = fdp.SerializeToString()
     x = MessageBuilder()
     classes = x.reconstitute_file_from_bytes(bytes)
     Test1 = classes[0]
     Test2 = classes[1]
     test1 = Test1()
     test1.a = 42
     test2 = Test2()
     test2.b = 'Bonsai Rules!!!'
     self.assertEqual(42, test1.a)
     self.assertEqual('Bonsai Rules!!!', test2.b)
Esempio n. 2
0
def test_record_writer(writer_path):
    """
    record writer.
    """
    fwriter = record.RecordWriter()
    if not fwriter.open(writer_path):
        print "writer open failed!"
        return
    print "+++ begin to writer..."
    fwriter.write_channel(CHAN_1, MSG_TYPE, STR_10B)
    fwriter.write_message(CHAN_1, STR_10B, 1000)

    msg = SimpleMessage()
    msg.text = "AAAAAA"

    file_desc = msg.DESCRIPTOR.file
    proto = FileDescriptorProto()
    file_desc.CopyToProto(proto)
    proto.name = file_desc.name
    desc_str = proto.SerializeToString()

    fwriter.write_channel('chatter_a', msg.DESCRIPTOR.full_name, desc_str)
    fwriter.write_message('chatter_a', msg, 998, False)
    fwriter.write_message("chatter_a", msg.SerializeToString(), 999)

    fwriter.close()
Esempio n. 3
0
 def _file_descriptor_response(
     self,
     file_descriptor: FileDescriptor,
 ) -> ServerReflectionResponse:
     proto = FileDescriptorProto()
     file_descriptor.CopyToProto(proto)  # type: ignore
     return ServerReflectionResponse(
         file_descriptor_response=FileDescriptorResponse(
             file_descriptor_proto=[proto.SerializeToString()], ), )
 def _add_file_from_response(
         self, file_descriptor: FileDescriptorResponse) -> None:
     protos: List[bytes] = file_descriptor.file_descriptor_proto
     for proto in protos:
         desc = FileDescriptorProto()
         desc.ParseFromString(proto)
         if desc.name not in self._known_files:
             self._logger.info("Loading descriptors from file: %s",
                               desc.name)
             self._known_files.add(desc.name)
             self.Add(desc)
Esempio n. 5
0
 def register_message(self, file_desc):
     """
     register proto message desc file.
     """
     for dep in file_desc.dependencies:
         self.register_message(dep)
     proto = FileDescriptorProto()
     file_desc.CopyToProto(proto)
     proto.name = file_desc.name
     desc_str = proto.SerializeToString()
     _CYBER_NODE.PyNode_register_message(self.node, desc_str)
Esempio n. 6
0
 def register_message(self, file_desc):
     """
     register proto message desc file.
     """
     for dep in file_desc.dependencies:
         self.register_message(dep)
     proto = FileDescriptorProto()
     file_desc.CopyToProto(proto)
     proto.name = file_desc.name
     desc_str = proto.SerializeToString()
     _CYBER.PyNode_register_message(self.node, desc_str)
    def discover(self, request, context):
        logger.info("discovering.")
        pprint(request)
        descriptor_set = FileDescriptorSet()
        for entity in self.event_sourced_entities + self.action_protocol_entities:
            logger.info(f"entity: {entity.name()}")
            for descriptor in entity.file_descriptors:
                logger.info(f"discovering {descriptor.name}")
                logger.info(f"SD: {entity.service_descriptor.full_name}")
                from_string = FileDescriptorProto.FromString(
                    descriptor.serialized_pb)
                descriptor_set.file.append(from_string)

        descriptor_set.file.append(
            FileDescriptorProto.FromString(Default().FindFileByName(
                "google/protobuf/empty.proto").serialized_pb))
        descriptor_set.file.append(
            FileDescriptorProto.FromString(Default().FindFileByName(
                "cloudstate/entity_key.proto").serialized_pb))
        descriptor_set.file.append(
            FileDescriptorProto.FromString(Default().FindFileByName(
                "cloudstate/eventing.proto").serialized_pb))
        descriptor_set.file.append(
            FileDescriptorProto.FromString(Default().FindFileByName(
                "google/protobuf/descriptor.proto").serialized_pb))
        descriptor_set.file.append(
            FileDescriptorProto.FromString(Default().FindFileByName(
                "google/api/annotations.proto").serialized_pb))
        descriptor_set.file.append(
            FileDescriptorProto.FromString(Default().FindFileByName(
                "google/api/http.proto").serialized_pb))
        descriptor_set.file.append(
            FileDescriptorProto.FromString(Default().FindFileByName(
                "google/api/httpbody.proto").serialized_pb))
        descriptor_set.file.append(
            FileDescriptorProto.FromString(Default().FindFileByName(
                "google/protobuf/any.proto").serialized_pb))
        spec = entity_pb2.EntitySpec(
            service_info=entity_pb2.ServiceInfo(
                service_name="",
                service_version="0.1.0",
                service_runtime="Python " + platform.python_version() + " [" +
                platform.python_implementation() + " " +
                platform.python_compiler() + "]",
                support_library_name="cloudstate-python-support",
                support_library_version="0.1.0",
            ),
            entities=[
                entity_pb2.Entity(
                    entity_type=entity.entity_type(),
                    service_name=entity.service_descriptor.full_name,
                    persistence_id=entity.persistence_id,
                ) for entity in self.event_sourced_entities +
                self.action_protocol_entities
            ],
            proto=descriptor_set.SerializeToString(),
        )
        return spec
Esempio n. 8
0
 def write_message(self, channel_name, data, time, raw=True):
     """
     writer msg:channelname,rawmsg,writer time
     """
     if raw:
         return _CYBER_RECORD.PyRecordWriter_WriteMessage(self.record_writer,
                 channel_name, data, time, "")
     else:
         file_desc = data.DESCRIPTOR.file
         proto = FileDescriptorProto()
         file_desc.CopyToProto(proto)
         proto.name = file_desc.name
         desc_str = proto.SerializeToString()
         return _CYBER_RECORD.PyRecordWriter_WriteMessage(self.record_writer,
                 channel_name, data.SerializeToString(), time, desc_str)
Esempio n. 9
0
 def write_message(self, channel_name, data, time, raw=True):
     """
     writer msg:channelname,rawmsg,writer time
     """
     if raw:
         return _CYBER_RECORD.PyRecordWriter_WriteMessage(self.record_writer,
                 channel_name, data, time, "")
     else:
         file_desc = data.DESCRIPTOR.file
         proto = FileDescriptorProto()
         file_desc.CopyToProto(proto)
         proto.name = file_desc.name
         desc_str = proto.SerializeToString()
         return _CYBER_RECORD.PyRecordWriter_WriteMessage(self.record_writer,
                 channel_name, data.SerializeToString(), time, desc_str)
Esempio n. 10
0
def parseFromString(pbStr):
    fdp = FileDescriptorProto()

    dp = fdp.message_type.add()
    dp.name = 'tb1'
    fieldList = [
        {
            "id": 1,
            "name": "id",
            "type": FieldDescriptor.TYPE_SINT32
        },
        #        {"id":2, "name":"name", "type":FieldDescriptor.TYPE_STRING},
        {
            "id": 3,
            "name": "addr",
            "type": FieldDescriptor.TYPE_STRING
        }
    ]

    for field in fieldList:
        fieldProto = dp.field.add()
        fieldProto.number = field["id"]
        fieldProto.type = field['type']
        fieldProto.name = field['name']

    dtop = MakeDescriptor(dp)
    factory = Default()
    factory.pool.AddDescriptor(dtop)
    MyProtoClass = factory.GetPrototype(dtop)
    myproto_instance = MyProtoClass()
    myproto_instance.ParseFromString(pbStr)
    # print dir(myproto_instance)
    ss = myproto_instance.SerializeToString()
    print ss == pbStr
    return myproto_instance
 def test_reconstitute_single_schema(self):
     fdp = FileDescriptorProto()
     fdp.name = 'test_schemas'
     mt = fdp.message_type.add()
     mt.name = 'tests'
     f1 = mt.field.add()
     f1.name = 'a'
     f1.number = 1
     f1.type = FieldDescriptorProto.TYPE_UINT32
     f1.label = FieldDescriptorProto.LABEL_OPTIONAL
     bytes = mt.SerializeToString()
     x = MessageBuilder()
     Test = x.reconstitute_from_bytes(bytes)
     test = Test()
     test.a = 42
     self.assertEqual(42, test.a)
Esempio n. 12
0
async def test_file_by_filename_response(channel):
    r1, r2 = await ServerReflectionStub(channel).ServerReflectionInfo([
        ServerReflectionRequest(file_by_filename=DESCRIPTOR.name, ),
        ServerReflectionRequest(file_by_filename='my/missing.proto', ),
    ])

    proto_bytes, = r1.file_descriptor_response.file_descriptor_proto
    dummy_proto = FileDescriptorProto()
    dummy_proto.ParseFromString(proto_bytes)
    assert dummy_proto.name == DESCRIPTOR.name
    assert dummy_proto.package == DESCRIPTOR.package

    assert r2 == ServerReflectionResponse(error_response=ErrorResponse(
        error_code=5,
        error_message='not found',
    ), )
Esempio n. 13
0
 def __init__(self, name=None):
     """
     Creates a message builder that will create a named or anonymous
     message.
     Args:
         name: The name of the message to create. If not provided or
               set to None, the name is set to
               'anonymous_message_XXXXXXXX_XXXX_XXXX_XXXX_XXXXXXXXXXXX',
               where each X is a random hex digit.
     Returns:
         nothing
     """
     self._name = name or 'anonymous_message_{}'.format(generate_guid())
     self._file_descriptor_name = 'schema_containing_{}'.format(self._name)
     self._package = 'bonsai.proto'
     self._full_name = '{}.{}'.format(self._package, self._name)
     self._fields = {}
     self._current_field_name = ''
     self._current_field_type = None
     self._current_field_is_array = False
     self._factory = MessageFactory()
     inkling_file_descriptor = FileDescriptorProto()
     inkling_types_pb2.DESCRIPTOR.CopyToProto(inkling_file_descriptor)
     self._factory.pool.Add(inkling_file_descriptor)
     self._fields_to_resolve = {}
Esempio n. 14
0
def _make_descriptor(descriptor_proto, package, full_name):
    """
    We basically need to re-implement the CPP API implementation of Protobuf's
    MakeDescriptor call here. The one provided by Google creates a file
    descriptor proto with a GUID-ish name, sticks the provided descriptor
    proto, adds that file into a default descriptor pool, then calls on the
    descriptor pool to return a descriptor with everything resolved.
    Unfortunately, if you have fields that are message types which require
    importing another file, there's no way to provide that import in the
    default MakeDescriptor() call.

    This call basically copies the default implementation, but instead of using
    the default pool, it uses a custom descriptor pool with Bonsai's Inkling
    Types already imported. It also adds the required import to the generated
    FileDescriptorProto for the schema represented by descriptor_proto.

    for reference, see:
    https://github.com/google/protobuf/python/google/protobuf/descriptor.py

    :param descriptor_proto: The descriptor proto to turn into a descriptor.
    :return: A descriptor corresponding to descriptor_proto.
    """

    # The descriptor may already exist... look for it first.
    pool = _message_factory.pool
    try:
        return pool.FindMessageTypeByName(full_name)
    except KeyError:
        pass

    proto_name = str(uuid.uuid4())
    proto_path = os.path.join(package, proto_name + '.proto')
    file_descriptor_proto = FileDescriptorProto()
    file_descriptor_proto.message_type.add().MergeFrom(descriptor_proto)
    file_descriptor_proto.name = proto_path
    file_descriptor_proto.package = package
    file_descriptor_proto.dependency.append('bonsai/proto/inkling_types.proto')

    # Not sure why this is needed; there's no documentation indicating how this
    # field is used. Some Google unit tests do this when adding a dependency,
    # so it's being done here too.
    file_descriptor_proto.public_dependency.append(0)

    pool.Add(file_descriptor_proto)
    result = pool.FindFileByName(proto_path)
    return result.message_types_by_name[descriptor_proto.name]
Esempio n. 15
0
async def test_file_containing_symbol_response(channel):
    r1, r2 = await ServerReflectionStub(channel).ServerReflectionInfo([
        ServerReflectionRequest(file_containing_symbol=(
            DESCRIPTOR.message_types_by_name['DummyRequest'].full_name), ),
        ServerReflectionRequest(file_containing_symbol='unknown.Symbol', ),
    ])

    proto_bytes, = r1.file_descriptor_response.file_descriptor_proto
    dummy_proto = FileDescriptorProto()
    dummy_proto.ParseFromString(proto_bytes)
    assert dummy_proto.name == DESCRIPTOR.name
    assert dummy_proto.package == DESCRIPTOR.package

    assert r2 == ServerReflectionResponse(error_response=ErrorResponse(
        error_code=5,
        error_message='not found',
    ), )
def _serialize_type_from_description(name, fields):
    fdp = FileDescriptorProto()
    fdp.name = '{}.proto'.format(name)
    mt = fdp.message_type.add()
    mt.name = name

    for idx, field in enumerate(fields):
        f = mt.field.add()
        f.name = field[0]
        f.number = idx + 1
        f.type = field[1]
        f.label = FieldDescriptorProto.LABEL_OPTIONAL
        if f.type == FieldDescriptorProto.TYPE_MESSAGE:
            f.type_name = field[2]

    data = mt.SerializeToString()
    return data
 def test_reconstitute_composite_schema_with_luminance(self):
     fdp = FileDescriptorProto()
     fdp.name = 'test_schemas'
     mt = fdp.message_type.add()
     mt.name = 'tests'
     f1 = mt.field.add()
     f1.name = 'a'
     f1.number = 1
     f1.type = FieldDescriptorProto.TYPE_MESSAGE
     f1.label = FieldDescriptorProto.LABEL_OPTIONAL
     f1.type_name = 'bonsai.inkling_types.proto.Luminance'
     bytes = mt.SerializeToString()
     x = MessageBuilder()
     Test = x.reconstitute_from_bytes(bytes)
     test = Test()
     test.a.width = 42
     self.assertEqual(42, test.a.width)
Esempio n. 18
0
def walk_binary(binr):
    if type(binr) == str:
        with open(binr, 'rb') as fd:
            binr = fd.read()

    # Search for:
    # ".proto" or ".protodevel", as part of the "name" (1) field
    cursor = 0
    while cursor < len(binr):
        cursor = binr.find(b'.proto', cursor)

        if cursor == -1:
            break
        cursor += len('.proto')
        cursor += (binr[cursor:cursor + 5] == b'devel') * 5

        # Search back for the (1, length-delimited) marker
        start = binr.rfind(b'\x0a', max(cursor - 1024, 0), cursor)

        if start > 0 and binr[start - 1] == 0x0a == (cursor - start - 1):
            start -= 1

        # Check whether length byte is coherent
        if start == -1:
            continue
        varint, end = _DecodeVarint(binr, start + 1)
        if cursor - end != varint:
            continue

        # Look just after for subsequent markers
        tags = b'\x12\x1a\x22\x2a\x32\x3a\x42\x4a\x50\x58\x62'
        if binr[cursor] not in tags:
            continue

        while cursor < len(binr) and binr[cursor] in tags:
            tags = tags[tags.index(binr[cursor]):]

            varint, end = _DecodeVarint(binr, cursor + 1)
            cursor = end + varint * (binr[cursor] & 0b111 == 2)

        # Parse descriptor
        proto = FileDescriptorProto()
        proto.ParseFromString(binr[start:cursor])

        # Convert to ascii
        yield descpb_to_proto(proto)
Esempio n. 19
0
    def reconstitute_file_from_bytes(self, file_descriptor_proto_bytes):
        """
        Reconstitutes one or more Python protobuf classes from a byte
        stream. The intended purpose of this function is to create a
        set of Protobuf Python classes from a byte stream file sent
        from another service. This way, services can define arbitrary
        data types and send schemas for those types to other services.
        Args:
            file_descriptor_proto_bytes: Serialized protocol buffer file
                                         containing one or more messages.

        Returns:
            An array containing each class contained in
            file_descriptor_proto_bytes.
        """
        file_descriptor_proto = FileDescriptorProto()
        file_descriptor_proto.ParseFromString(file_descriptor_proto_bytes)
        return self.reconstitute_file(file_descriptor_proto)
Esempio n. 20
0
    def disassemble(self):
        """Disassemble serialized protocol buffers file.
        """
        ser_pb = open(self.input_file, 'rb').read()  # Read serialized pb file

        fd = FileDescriptorProto()
        fd.ParseFromString(ser_pb)
        self.name = fd.name

        self._print('// Reversed by pbd (https://github.com/rsc-dev/pbd)')

        if len(fd.package) > 0:
            self._print('package {};'.format(fd.package))
            self.package = fd.package
        else:
            self._print('// Package not defined')

        self._walk(fd)
Esempio n. 21
0
    def extract(self):
        try:
            content = open(self.filename,'rb').read()

            # search all '.proto' strings
            protos = []
            stream = content
            while len(stream)>0:
                try:
                    r = stream.index('.proto')
                    for j in range(64):
                        try:
                            if decode_varint128(stream[r-j:])[0]==(j+5) and is_valid_filename(stream[r-j+1:r+6]):
                                # Walk the fields and get a probable size
                                walker = ProtobufFieldsWalker(stream[r-j-1:])
                                walker.walk()
                                probable_size = walker.get_size()
                                
                                """
                                Probable size approach is not perfect,
                                we add a delta of 1024 bytes to be sure
                                not to miss something =)
                                """
                                for k in range(probable_size+1024, 0, -1):
                                    try:
                                        fds  = FileDescriptorProto()
                                        fds.ParseFromString(stream[r-j-1:r-j-1+k])
                                        protos.append(stream[r-j-1:r-j-1+k])
                                        print('[i] Found protofile %s (%d bytes)' % (stream[r-j+1:r+6], k))
                                        break
                                    except DecodeError:
                                        pass
                                    except UnicodeDecodeError:
                                        pass
                                break
                        except IndexError:
                            pass
                    stream = stream[r+6:]
                except ValueError:
                    break

            # Load successively each binary proto file and rebuild it from scratch
            seen = []
            for content in protos:
                try:
                    # Load the prototype
                    fds  = FileDescriptorProto()
                    fds.ParseFromString(content)
                    res = FileDescriptorDisassembler(fds)
                    if len(res.desc.name)>0:
                        if res.desc.name not in seen:
                            open(res.desc.name+'.protoc','wb').write(content)
                            res.render()
                            seen.append(res.desc.name)
                except DecodeError:
                    pass
            
        except IOError:
            print('[!] Unable to read %s' % sys.argv[1])
Esempio n. 22
0
def update_message_classes():
    global message_classes, descriptor_path, method_info
    factory = MessageFactory()
    # Add well-known types first
    for file_descriptor in file_descriptors.values():
        file_proto = FileDescriptorProto()
        file_proto.ParseFromString(file_descriptor.serialized_pb)
        factory.pool.Add(file_proto)
    # Then add our types
    with open(descriptor_path, 'rb') as f:
        fileset = google.protobuf.descriptor_pb2.FileDescriptorSet.FromString(f.read())
    for file_proto in fileset.file:
        factory.pool.Add(file_proto)
    message_classes = factory.GetMessages([file_proto.name for file_proto in fileset.file])

    # HACK to add nested types. Is there an API for this?
    for desc in factory.pool._descriptors.values():
        if desc.full_name not in message_classes:
            message_classes[desc.full_name] = factory.GetPrototype(desc)

    method_info = {}

    for file_proto in fileset.file:
        for service in file_proto.service:
            for method in service.method:
                k = "{}.{}".format(service.name, method.name)
                input_type = method.input_type
                output_type = method.output_type
                if input_type.startswith('.'):
                    input_type = input_type[1:]
                if output_type.startswith('.'):
                    output_type = output_type[1:]
                if input_type not in message_classes or output_type not in message_classes:
                    print("WARNING: types for method {} not found".format(k))
                input_type = message_classes[input_type]
                output_type = message_classes[output_type]

                method_info[k] = (method, input_type, output_type)
Esempio n. 23
0
def test_record_writer(writer_path):
    """
    Record writer.
    """
    fwriter = record.RecordWriter()
    fwriter.set_size_fileseg(0)
    fwriter.set_intervaltime_fileseg(0)

    if not fwriter.open(writer_path):
        print('Failed to open record writer!')
        return
    print('+++ Begin to writer +++')

    # Writer 2 SimpleMessage
    msg = SimpleMessage()
    msg.text = "AAAAAA"

    file_desc = msg.DESCRIPTOR.file
    proto = FileDescriptorProto()
    file_desc.CopyToProto(proto)
    proto.name = file_desc.name
    desc_str = proto.SerializeToString()
    print(msg.DESCRIPTOR.full_name)
    fwriter.write_channel(
        'simplemsg_channel', msg.DESCRIPTOR.full_name, desc_str)
    fwriter.write_message('simplemsg_channel', msg, 990, False)
    fwriter.write_message('simplemsg_channel', msg.SerializeToString(), 991)

    # Writer 2 Chatter
    msg = Chatter()
    msg.timestamp = 99999
    msg.lidar_timestamp = 100
    msg.seq = 1

    file_desc = msg.DESCRIPTOR.file
    proto = FileDescriptorProto()
    file_desc.CopyToProto(proto)
    proto.name = file_desc.name
    desc_str = proto.SerializeToString()
    print(msg.DESCRIPTOR.full_name)
    fwriter.write_channel('chatter_a', msg.DESCRIPTOR.full_name, desc_str)
    fwriter.write_message('chatter_a', msg, 992, False)
    msg.seq = 2
    fwriter.write_message("chatter_a", msg.SerializeToString(), 993)

    fwriter.close()
Esempio n. 24
0
    def _find_descriptor(self, desc_proto, package):
        if desc_proto is None:
            return None
        full_name = '{}.{}'.format(package, desc_proto.name)
        pool = self._message_factory.pool
        try:
            return pool.FindMessageTypeByName(full_name)
        except KeyError:
            pass

        proto_name = str(uuid.uuid4())
        proto_path = os.path.join(package, proto_name + '.proto')
        file_desc_proto = FileDescriptorProto()
        file_desc_proto.message_type.add().MergeFrom(desc_proto)
        file_desc_proto.name = proto_path
        file_desc_proto.package = package

        file_desc_proto.dependency.append('bonsai/proto/inkling_types.proto')

        file_desc_proto.public_dependency.append(0)

        pool.Add(file_desc_proto)
        result = pool.FindFileByName(proto_path)
        return result.message_types_by_name[desc_proto.name]
Esempio n. 25
0
def build_fds_for_msg(msg):
  """
  Given a Protobuf message `msg` (or message class), build a
  `FileDescriptorSet` that can be used with `DynamicMessageFactory` below (or
  `protobag::DynamicMsgFactory` in C++) to dynamically deserialize instances
  of `msg` at runtime (when the Protobuf-generated code for `msg` is 
  unavailable).

  See also `protobag::DynamicMsgFactory` in C++.

  We run a BFS of `msg`'s descriptor and its dependencies to collect all
  data necessary to decode a `msg` instance.  (NB: the current search is today
  over-complete and pulls in unrelated types, too).  The algorithm below
  mirrors that in `protobag::BagIndexBuilder::Observe()`.  We must run this
  collection in python (and not C++) because we assume we only have the
  Protobuf python-generated code available for `msg` in this code path.

  Args:
      msg (Protobuf message or class): Build a `FileDescriptorSet` based upon
        the `DESCRIPTOR` of this message.
    
  Returns:
  A `FileDescriptorSet` protobuf message instance.
  """

  from google.protobuf.descriptor_pb2 import FileDescriptorProto
  from google.protobuf.descriptor_pb2 import FileDescriptorSet

  q = [msg.DESCRIPTOR.file]
  visited = set()
  files = []
  while q:
    current = q.pop()
    if current.name not in visited:
      # Visit!
      visited.add(current.name)
      
      fd = FileDescriptorProto()
      current.CopyToProto(fd)
      files.append(fd)

      q.extend(current.dependencies)
  
  return FileDescriptorSet(file=files)
 def discover(self, request, context):
     pprint(request)
     descriptor_set = FileDescriptorSet()
     for entity in self.event_sourced_entities:
         for descriptor in entity.file_descriptors:
             descriptor_set.file.append(FileDescriptorProto.FromString(descriptor.serialized_pb))
     descriptor_set.file.append(
         FileDescriptorProto.FromString(Default().FindFileByName('google/protobuf/empty.proto').serialized_pb)
     )
     descriptor_set.file.append(
         FileDescriptorProto.FromString(Default().FindFileByName('cloudstate/entity_key.proto').serialized_pb)
     )
     descriptor_set.file.append(
         FileDescriptorProto.FromString(Default().FindFileByName('google/protobuf/descriptor.proto').serialized_pb)
     )
     descriptor_set.file.append(
         FileDescriptorProto.FromString(Default().FindFileByName('google/api/annotations.proto').serialized_pb)
     )
     descriptor_set.file.append(
         FileDescriptorProto.FromString(Default().FindFileByName('google/api/http.proto').serialized_pb)
     )
     spec = entity_pb2.EntitySpec(
         service_info=entity_pb2.ServiceInfo(
             service_version='0.1.0',
             service_runtime='Python ' + platform.python_version() + ' [' + platform.python_implementation() + ' ' +
                             platform.python_compiler() + ']',
             support_library_name='cloudstate-python-support',
             support_library_version='0.1.0'
         ),
         entities=[
             entity_pb2.Entity(
                 entity_type=entity.entity_type(),
                 service_name=entity.service_descriptor.full_name,
                 persistence_id=entity.persistence_id,
             )
             for entity in self.event_sourced_entities],
         proto=descriptor_set.SerializeToString()
     )
     return spec
Esempio n. 27
0
def nest_and_print_to_files(msg_path_to_obj, msg_to_referrers):
    msg_to_topmost = {}
    msg_to_newloc = {}
    newloc_to_msg = {}
    msg_to_imports = defaultdict(list)
    for msg, referrers in msg_to_referrers.items():
        for _, referrer, _ in referrers:
            msg_to_imports[referrer].append(msg)

    # Iterate over referred to messages/groups/enums.

    # Merge groups first:
    msg_to_referrers = OrderedDict(
        sorted(msg_to_referrers.items(), key=lambda x: -x[1][0][2]))

    mergeable = {}
    enumfield_to_enums = defaultdict(set)
    enum_to_dupfields = defaultdict(set)

    for msg, referrers in dict(msg_to_referrers).items():
        msg_pkg = get_pkg(msg)
        msg_obj = msg_path_to_obj[msg]

        # Check for duplicate enum fields in the same package:
        if not isinstance(msg_obj, DescriptorProto):
            for enum_field in msg_obj.value:
                name = msg_pkg + '.' + enum_field.name
                enumfield_to_enums[name].add(msg)

                if len(enumfield_to_enums[name]) > 1:
                    for other_enum in enumfield_to_enums[name]:
                        enum_to_dupfields[other_enum].add(name)

        first_field = referrers[0]
        field, referrer, is_group = first_field

        # Check whether message/enum has exactly one reference in this
        # package:
        if not is_group:
            in_pkg = [(field, referrer) for field, referrer, _ in referrers \
                      if (get_pkg(referrer) == msg_pkg or not msg_pkg) \
                      and msg_to_topmost.get(referrer, referrer) != msg \
                      and not msg_path_to_obj[referrer].options.map_entry \
                      and ('$' not in msg or msg.split('.')[-1].split('$')[0] == \
                                        referrer.split('.')[-1].split('$')[0])]

            if len({i for _, i in in_pkg}) != 1:
                # It doesn't. Keep for the next step
                if in_pkg:
                    mergeable[msg] = in_pkg
                continue
            else:
                field, referrer = in_pkg[0]

        else:
            assert len(referrers) == 1

        merge_and_rename(msg, referrer, msg_pkg, is_group, msg_to_referrers,
                         msg_to_topmost, msg_to_newloc, msg_to_imports,
                         msg_path_to_obj, newloc_to_msg)

    # Try to fix recursive (mutual) imports, and conflicting enum field names.
    for msg, in_pkg in mergeable.items():
        duplicate_enumfields = enum_to_dupfields.get(msg, set())

        for field, referrer in sorted(
                in_pkg,
                key=lambda x: msg_to_newloc.get(x[1], x[1]).count('.')):
            top_referrer = msg_to_topmost.get(referrer, referrer)

            if (msg in msg_to_imports[top_referrer] and \
                top_referrer in msg_to_imports[msg] and \
                msg_to_topmost.get(referrer, referrer) != msg) or \
                duplicate_enumfields:

                merge_and_rename(msg, referrer, get_pkg(msg), False,
                                 msg_to_referrers, msg_to_topmost,
                                 msg_to_newloc, msg_to_imports,
                                 msg_path_to_obj, newloc_to_msg)
                break

        for dupfield in duplicate_enumfields:
            siblings = enumfield_to_enums[dupfield]
            siblings.remove(msg)
            if len(siblings) == 1:
                enum_to_dupfields[siblings.pop()].remove(dupfield)

    for msg, msg_obj in msg_path_to_obj.items():
        # If we're a top-level message, enforce name transforms anyway
        if msg not in msg_to_topmost:
            new_name = msg_obj.name.split('$')[-1]
            new_name = new_name[0].upper() + new_name[1:]

            msg_pkg = get_pkg(msg)
            if msg_pkg:
                msg_pkg += '.'

            if new_name != msg_obj.name:
                while newloc_to_msg.get(msg_pkg + new_name, msg_pkg + new_name) in msg_path_to_obj and \
                      newloc_to_msg.get(msg_pkg + new_name, msg_pkg + new_name) not in msg_to_topmost:
                    new_name += '_'
                msg_obj.name = new_name

            fix_naming(msg_obj, msg_pkg + new_name, msg, msg, msg_to_referrers,
                       msg_to_topmost, msg_to_newloc, msg_to_imports,
                       msg_path_to_obj, newloc_to_msg)

    # Turn messages into individual files and stringify.

    path_to_file = OrderedDict()
    path_to_defines = defaultdict(list)

    for msg, msg_obj in msg_path_to_obj.items():
        if msg not in msg_to_topmost:
            path = msg.split('$')[0].replace('.', '/') + '.proto'

            if path not in path_to_file:
                path_to_file[path] = FileDescriptorProto()
                path_to_file[path].syntax = 'proto2'
                path_to_file[path].package = get_pkg(msg)
                path_to_file[path].name = path
            file_obj = path_to_file[path]

            for imported in msg_to_imports[msg]:
                import_path = imported.split('$')[0].replace('.',
                                                             '/') + '.proto'
                if import_path != path and imported not in msg_to_topmost:
                    if import_path not in file_obj.dependency:
                        file_obj.dependency.append(import_path)

            if isinstance(msg_obj, DescriptorProto):
                nested = file_obj.message_type.add()
            else:
                nested = file_obj.enum_type.add()
            nested.MergeFrom(msg_obj)

            path_to_defines[path].append(msg)
            path_to_defines[path] += [
                k for k, v in msg_to_topmost.items()
                if v == msg and '$map' not in k
            ]

    for path, file_obj in path_to_file.items():
        name, proto = descpb_to_proto(file_obj)
        header_lines = ['/**', 'Messages defined in this file:\n']
        header_lines += path_to_defines[path]
        yield name, '\n * '.join(header_lines) + '\n */\n\n' + proto
Esempio n. 28
0
 def __init__(self):
     self._message_factory = MessageFactory()
     inkling_file_desc = FileDescriptorProto()
     inkling_types_pb2.DESCRIPTOR.CopyToProto(inkling_file_desc)
     self._message_factory.pool.Add(inkling_file_desc)
Esempio n. 29
0
        r2.cmd('aaa')
        # print_info('Finished analysis')
        sections = json.loads(r2.cmd('Sj'))
        xrefs = r2.cmd('axF InternalAddGeneratedFile|cut -d" " -f2|sort -u').split()

        def get_paddr(vaddr):
            for section in sections:
                if vaddr >= section['vaddr'] and vaddr < section['vaddr'] + section['size']:
                    return vaddr - section['vaddr'] + section['paddr']
            print_error("Can't find virtual address {}", vaddr)
            return 0

        for xref in xrefs:
            disasm = json.loads(r2.cmd('pdj -2 @ ' + xref))
            if disasm[0]['type'] == 'push' and disasm[1]['type'] == 'push':
                length = disasm[0]['val']
                addr = disasm[1]['val']
                # print_info('Found protobuf of length {:6d} at addr 0x{:8x}', length, addr)
                paddr = get_paddr(addr)
                data = f[paddr:paddr+length]
                try:
                    fdp = FileDescriptorProto.FromString(data)
                    print_info('Found FiledescriptorProto of length {:6d} at addr 0x{:08x}: {}', length, paddr, fdp.name, color='green')
                    outfile = open(os.path.join(out_dir, fdp.name.replace('/', '_')), 'wb')
                    outfile.write(data)
                    # print(fdp)
                except google.protobuf.message.DecodeError:
                    print_error('Error while decoding data at offset 0x{:08x}, length {:6d} as FiledescriptorProto', paddr, length)
            else:
                print_warning('No push in immediate vicinity')
Esempio n. 30
0
import uuid
import os

from google.protobuf.descriptor_pb2 import FileDescriptorProto
from google.protobuf.descriptor_pb2 import DescriptorProto
from google.protobuf.message_factory import MessageFactory

from bonsai.proto import inkling_types_pb2

# The message factory
_message_factory = MessageFactory()

# (Relying on Python module implementation being thread-safe here...)
# Add our custom inkling types into the message factory pool so
# they are available to the message factory.
_inkling_file_descriptor = FileDescriptorProto()
inkling_types_pb2.DESCRIPTOR.CopyToProto(_inkling_file_descriptor)
_message_factory.pool.Add(_inkling_file_descriptor)


def _create_package_from_fields(descriptor_proto):
    """
    This generates a "package" name from the fields in a descriptor proto.
    :param descriptor_proto: The DescriptorProto object to analyze.
    :return: Unique "hash" of the fields and field types in descriptor_proto.
    """
    elements = (tuple((f.name, f.number, f.label, f.type, f.type_name)
                      for f in descriptor_proto.field) if descriptor_proto else
                ())

    # The hash function here is used only to generate an identifier that's
Esempio n. 31
0
 def _file_descriptor_response(self, file_descriptor):
     proto = FileDescriptorProto()
     file_descriptor.CopyToProto(proto)
     return self._pb.ServerReflectionResponse(
         file_descriptor_response=self._pb.FileDescriptorResponse(
             file_descriptor_proto=[proto.SerializeToString()], ), )
Esempio n. 32
0
def pxd_file():
    proto_files = [
        {
            "name": "pb/people/models/people.proto",
            "package": "pb.people.models",
            "dependency": ["pb/address/models/address.proto"],
            "messageType": [
                {
                    "name": "Person",
                    "field": [
                        {
                            "name": "name",
                            "number": 1,
                            "label": "LABEL_OPTIONAL",
                            "type": "TYPE_STRING",
                            "jsonName": "name",
                        },
                        {
                            "name": "id",
                            "number": 2,
                            "label": "LABEL_OPTIONAL",
                            "type": "TYPE_INT32",
                            "jsonName": "id",
                        },
                        {
                            "name": "email",
                            "number": 3,
                            "label": "LABEL_OPTIONAL",
                            "type": "TYPE_STRING",
                            "jsonName": "email",
                        },
                        {
                            "name": "phones",
                            "number": 4,
                            "label": "LABEL_REPEATED",
                            "type": "TYPE_MESSAGE",
                            "typeName": ".pb.people.models.Person.PhoneNumber",
                            "jsonName": "phones",
                        },
                        {
                            "name": "address",
                            "number": 5,
                            "label": "LABEL_OPTIONAL",
                            "type": "TYPE_MESSAGE",
                            "typeName": ".pb.address.models.Address",
                            "jsonName": "address",
                        },
                    ],
                    "nestedType": [
                        {
                            "name": "PhoneNumber",
                            "field": [
                                {
                                    "name": "number",
                                    "number": 1,
                                    "label": "LABEL_OPTIONAL",
                                    "type": "TYPE_STRING",
                                    "jsonName": "number",
                                },
                                {
                                    "name": "type",
                                    "number": 2,
                                    "label": "LABEL_OPTIONAL",
                                    "type": "TYPE_ENUM",
                                    "typeName": ".pb.people.models.Person.PhoneType",
                                    "jsonName": "type",
                                },
                            ],
                        }
                    ],
                    "enumType": [
                        {
                            "name": "PhoneType",
                            "value": [
                                {"name": "MOBILE", "number": 0},
                                {"name": "HOME", "number": 1},
                                {"name": "WORK", "number": 2},
                            ],
                        }
                    ],
                }
            ],
            "syntax": "proto3",
        },
        {
            "name": "pb/address/models/address.proto",
            "package": "pb.address.models",
            "messageType": [
                {
                    "name": "Address",
                    "field": [
                        {
                            "name": "street",
                            "number": 1,
                            "label": "LABEL_OPTIONAL",
                            "type": "TYPE_STRING",
                            "jsonName": "street",
                        }
                    ],
                }
            ],
            "syntax": "proto3",
        },
    ]
    parsed_files = ProtoFile.from_file_descriptor_protos(
        [json_format.ParseDict(x, FileDescriptorProto()) for x in proto_files],
        {"pb/people/models/people.proto"},
        "",
    )
    return next(x for x in parsed_files if x.proto_filename == "pb/people/models/people.proto")