コード例 #1
0
    sys.exit(0)

entries = []
for i in range(num_chunks):
    buf = f.read(24)
    entries.append(struct.unpack('<QQQ', buf))

if entries[-1][1] != dir_pos:
    entries.append((0, dir_pos, 0))

#print entries

for entry, next_entry in zip(entries, entries[1:]):
    start_instr, start_pos, num_entries = entry
    next_pos = next_entry[1]
    f.seek(start_pos)
    zsize = next_pos - start_pos
    #print start_pos, next_pos, zsize,
    zdata = f.read(zsize)
    data = zlib.decompress(zdata, 15, chunk_size)
    #print len(data)
    i = 0
    while i < len(data):
        entry_size = struct.unpack('<I', data[i:i + 4])[0]
        i += 4
        entry_data = data[i:i + entry_size]
        message = plog_pb2.LogEntry()
        message.ParseFromString(entry_data)
        print MessageToJson(message)
        i += entry_size
コード例 #2
0
def main(_):
    if not FLAGS.data_path:
        raise ValueError("Must set --data_path to PTB data directory")
    gpus = [
        x.name for x in device_lib.list_local_devices()
        if x.device_type == "GPU"
    ]
    if FLAGS.num_gpus > len(gpus):
        raise ValueError("Your machine has only %d gpus "
                         "which is less than the requested --num_gpus=%d." %
                         (len(gpus), FLAGS.num_gpus))

    raw_data = reader.ptb_raw_data(FLAGS.data_path)
    train_data, valid_data, test_data, _ = raw_data

    config = get_config()
    eval_config = get_config()
    eval_config.batch_size = 1
    eval_config.num_steps = 1

    with tf.Graph().as_default():
        initializer = tf.random_uniform_initializer(-config.init_scale,
                                                    config.init_scale)

        with tf.name_scope("Train"):
            train_input = PTBInput(config=config,
                                   data=train_data,
                                   name="TrainInput")
            with tf.variable_scope("Model",
                                   reuse=None,
                                   initializer=initializer):
                m = PTBModel(is_training=True,
                             config=config,
                             input_=train_input)
            tf.summary.scalar("Training Loss", m.cost)
            tf.summary.scalar("Learning Rate", m.lr)

        with tf.name_scope("Valid"):
            valid_input = PTBInput(config=config,
                                   data=valid_data,
                                   name="ValidInput")
            with tf.variable_scope("Model",
                                   reuse=True,
                                   initializer=initializer):
                mvalid = PTBModel(is_training=False,
                                  config=config,
                                  input_=valid_input)
            tf.summary.scalar("Validation Loss", mvalid.cost)

        with tf.name_scope("Test"):
            test_input = PTBInput(config=eval_config,
                                  data=test_data,
                                  name="TestInput")
            with tf.variable_scope("Model",
                                   reuse=True,
                                   initializer=initializer):
                mtest = PTBModel(is_training=False,
                                 config=eval_config,
                                 input_=test_input)

        models = {"Train": m, "Valid": mvalid, "Test": mtest}
        for name, model in models.items():
            model.export_ops(name)
        metagraph = tf.train.export_meta_graph()
        temp_meta = MessageToJson(metagraph.graph_def)
        with open('kernelLogs/metagraph.json', 'w') as outfile:
            json.dump(temp_meta, outfile)
        #sys.exit()
        if tf.__version__ < "1.1.0" and FLAGS.num_gpus > 1:
            raise ValueError(
                "num_gpus > 1 is not supported for TensorFlow versions "
                "below 1.1.0")


#    soft_placement = True
        soft_placement = False
        if FLAGS.num_gpus > 1:
            soft_placement = True
            util.auto_parallel(metagraph, m)
        #added by ubaid
        all_ops = tf.get_default_graph().get_operations()
        adj_list_graph = {}
        for op in all_ops:
            adj_list_graph[op.name] = set([inp.name for inp in op.inputs])
        adj_list_graph_notensors = {}
        for op in all_ops:
            adj_list_graph_notensors[op.name] = set(
                [inp.name.split(":")[0] for inp in op.inputs])

        adj_list_graph_notensors = {
            op_name: list(op_deps)
            for op_name, op_deps in adj_list_graph_notensors.items()
        }
        adj_list_graph = {
            op_name: list(op_deps)
            for op_name, op_deps in adj_list_graph.items()
        }

        with open('kernelLogs/org_graph_rnnlm_ptb_%s.json' % (FLAGS.model),
                  'w') as outfile:
            json.dump(adj_list_graph, outfile)
        with open(
                'kernelLogs/org_graph_notensors_rnnlm_ptb_%s.json' %
            (FLAGS.model), 'w') as outfile:
            json.dump(adj_list_graph_notensors, outfile)
        #sys.exit()
        #####

    with tf.Graph().as_default():
        tf.train.import_meta_graph(metagraph)
        for model in models.values():
            model.import_ops()
        sv = tf.train.Supervisor(logdir=FLAGS.save_path)

        #config_proto = tf.ConfigProto(allow_soft_placement=soft_placement)
        # added by xilenteyex
        config_proto = tf.ConfigProto(
            allow_soft_placement=soft_placement,
            graph_options=tf.GraphOptions(build_cost_model=1))
        config_proto.intra_op_parallelism_threads = 1
        config_proto.inter_op_parallelism_threads = 1
        config_proto.graph_options.optimizer_options.opt_level = -1
        config_proto.graph_options.rewrite_options.constant_folding = (
            rewriter_config_pb2.RewriterConfig.OFF)
        config_proto.graph_options.rewrite_options.arithmetic_optimization = (
            rewriter_config_pb2.RewriterConfig.OFF)
        config_proto.graph_options.rewrite_options.dependency_optimization = (
            rewriter_config_pb2.RewriterConfig.OFF)
        config_proto.graph_options.rewrite_options.layout_optimizer = (
            rewriter_config_pb2.RewriterConfig.OFF)
        ######

        with sv.managed_session(config=config_proto) as session:
            for i in range(config.max_max_epoch):
                lr_decay = config.lr_decay**max(i + 1 - config.max_epoch, 0.0)
                m.assign_lr(session, config.learning_rate * lr_decay)

                print("Epoch: %d Learning rate: %.3f" %
                      (i + 1, session.run(m.lr)))
                train_perplexity = run_epoch(session,
                                             m,
                                             eval_op=m.train_op,
                                             verbose=True,
                                             epoch_no=i)
                print("Epoch: %d Train Perplexity: %.3f" %
                      (i + 1, train_perplexity))
                valid_perplexity = run_epoch(session, mvalid)
                print("Epoch: %d Valid Perplexity: %.3f" %
                      (i + 1, valid_perplexity))

            test_perplexity = run_epoch(session, mtest)
            print("Test Perplexity: %.3f" % test_perplexity)

            if FLAGS.save_path:
                print("Saving model to %s." % FLAGS.save_path)
                sv.saver.save(session,
                              FLAGS.save_path,
                              global_step=sv.global_step)
コード例 #3
0
 def onRpcNoticeRtn(rpcNoticeRtn):
     logger.info("收到通知信息%s", print(MessageToJson(rpcNoticeRtn)))
コード例 #4
0
ファイル: helpers.py プロジェクト: allogn/fair-taxi-cruising
def load_tb_summary_as_df(experiment_name,
                          plotting_param,
                          solver_name,
                          solver_key_params=[],
                          smoothing=0):
    """
    :param plotting_param: a str that must be contained in the name of the loaded param


    Finds and loads as Pandas dataframe results of the solver with the name and key params.
    Key params are a list of < key , value > pairs that uniqly indicate the solver instance.
    If multiple runs are found for the key_params, then they all are loaded, averaged, and std is added.
    """
    data_dir = os.path.join(os.environ['ALLDATA_PATH'], "generated",
                            experiment_name)
    assert len(
        os.listdir(data_dir)
    ) == 1, "There must be exactly one generated network per experiment. Other versions not supported"
    data_dir = os.path.join(data_dir, os.listdir(data_dir)[0])

    # traverse through all dirs, and check for each dir if it corresponds to the solver name and params
    dirs = [d for d in os.listdir(data_dir) if not os.path.isfile(os.path.join(data_dir, d)) \
                    and is_solver_correct(d, solver_name, solver_key_params)]
    if len(dirs) == 0:
        raise Exception("There is no such solver name with such params")
    # there might be several runs for the same params, and also _stats and not-stats

    list_for_df = []
    # There might be several runs for the same solver
    for run_dir in dirs:
        run_id = int(run_dir[-1])  # last digit is the run_id
        path = os.path.join(data_dir, run_dir)
        files = list(
            os.path.join(path, f) for f in os.listdir(path)
            if os.path.isfile(os.path.join(path, f)))
        if len(files) != 1:
            files = [p for p in files
                     if p.find("events") > -1]  # if it is cA2C
            if len(path) == 0:
                raise Exception(
                    "There are more than one run for the same footprint, \
                                check that get_footprint_params of the solver has all the variable params. \
                                Or there is something else wrong.")
        first_file = files[0]
        for summary in tf.train.summary_iterator(first_file):
            step = summary.step
            serialized = json.loads(MessageToJson(summary.summary))
            try:
                for d in serialized['value']:
                    if (d['tag'].find(plotting_param) > -1) and (
                            d['tag'].find(plotting_param + "_") == -1):
                        list_for_df.append({
                            "step": step,
                            "val": d['simpleValue'],
                            "run_id": run_id
                        })
                        # this might raise KeyError if the requested params is a hist (an array). Then there is no simpleValue.
            except KeyError:
                pass
    df = pd.DataFrame(list_for_df)
    if df.duplicated().any():
        raise Exception(
            "There are duplicated values, check uniquness of the plotting parameter"
        )
    if len(df) == 0:
        raise Exception("No results, empty dataframe")

    # first do smoothing, then averaging!
    win_size = int(0.1 * len(df))
    for run_id in df["run_id"].unique():
        s = df[df["run_id"] == run_id].rolling(win_size).sum()['val']
        df.loc[df["run_id"] == run_id, 'val'] = s
    df = df[df['val'].notnull()]

    df = df.groupby(by="step", as_index=False).agg(['mean', 'std'])
    df.reset_index(inplace=True)
    return df
コード例 #5
0
sess = tf.Session(config=config_proto)
sess.run(tf.global_variables_initializer())

X_, Y_ = sess.run([X, Y])
X_Y_ = X_ + Y_
_X_Y = _X + _Y

tot_time = 0
for i in range(10):
    print(i)
    run_metadata = tf.RunMetadata()
    run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE,
                                output_partition_graphs=True)
    st = time.time()
    sess.run(Z1 + Z2 + Z3, {_i: i_
                            for _i, i_ in zip(_X_Y, X_Y_)},
             options=run_options,
             run_metadata=run_metadata)
    tot_time += time.time() - st

    if i >= 2:
        jsonObj = MessageToJson(run_metadata)
        with open('%s/metadata_%d.json' % (logPath, i), 'w') as outfile:
            json.dump(jsonObj, outfile)

        trace = timeline.Timeline(step_stats=run_metadata.step_stats)
        trace_file = open('%s/timeline_%d.ctf.json' % (logPath, i), 'w')
        trace_file.write(trace.generate_chrome_trace_format())
print('total time taken : ', tot_time)
コード例 #6
0
ファイル: adapter_agent.py プロジェクト: wanman/voltha
 def _gen_tx_proxy_address_topic(self, proxy_address):
     """Generate unique topic name specific to this proxy address for tx"""
     topic = 'tx:' + MessageToJson(proxy_address)
     return topic
コード例 #7
0
def unserialize(obj):
    return json.loads(MessageToJson(obj, including_default_value_fields=True))
コード例 #8
0
def pb_to_yaml(message):
    message_dict = MessageToJson(message)
    return dump_to_yaml_str(message_dict)
コード例 #9
0
def convert(source_file,
            target_file,
            trim_unused_by_output="",
            verbose=False,
            compress_f16=False):
    """
    Converts a TensorFlow model into a Barracuda model.
    :param source_file: The TensorFlow Model
    :param target_file: The name of the file the converted model will be saved to
    :param trim_unused_by_output: The regexp to match output nodes to remain in the model. All other uconnected nodes will be removed.
    :param verbose: If True, will display debug messages
    :param compress_f16: If true, the float values will be converted to f16
    :return:
    """
    if (type(verbose) == bool):
        args = Struct()
        args.verbose = verbose
        args.print_layers = verbose
        args.print_source_json = verbose
        args.print_barracuda_json = verbose
        args.print_layer_links = verbose
        args.print_patterns = verbose
        args.print_tensors = verbose
    else:
        args = verbose

    # Load Tensorflow model
    print("Converting %s to %s" % (source_file, target_file))
    f = open(source_file, 'rb')
    i_model = tf.GraphDef()
    i_model.ParseFromString(f.read())

    if args.verbose:
        print('OP_TYPES:', {layer.op for layer in i_model.node})

    if args.print_source_json or args.verbose:
        for layer in i_model.node:
            if not layer.op == 'Const':
                print('MODEL:', MessageToJson(layer) + ",")

    # Convert
    o_model = barracuda.Model()
    o_model.layers, o_input_shapes, o_model.tensors, o_model.memories = \
        process_model(i_model, args)

    # Cleanup unconnected Identities (they might linger after processing complex node patterns like LSTM)
    def cleanup_layers(layers):
        all_layers = {l.name for l in layers}
        all_inputs = {i for l in layers for i in l.inputs}

        def is_unconnected_identity(layer):
            if layer.class_name == 'Activation' and layer.activation == 0:  # Identity
                assert (len(layer.inputs) == 1)
                if layer.inputs[
                        0] not in all_layers and layer.name not in all_inputs:
                    return True
            return False

        return [l for l in layers if not is_unconnected_identity(l)]

    o_model.layers = cleanup_layers(o_model.layers)

    all_inputs = {i for l in o_model.layers for i in l.inputs}
    embedded_tensors = {t.name for l in o_model.layers for t in l.tensors}

    # Find global tensors
    def dims_to_barracuda_shape(dims):
        shape = list(dims)
        while len(shape) < 4:
            shape = [1] + shape
        return shape

    o_model.globals = [
        t for t in o_model.tensors
        if t not in all_inputs and t not in embedded_tensors
    ]
    #for x in global_tensors:
    #    shape = dims_to_barracuda_shape(get_tensor_dims(o_model.tensors[x]))
    #    o_globals += [Struct(
    #        name = x,
    #        shape = shape,
    #        data = np.reshape(get_tensor_data(o_model.tensors[x]), shape).astype(np.float32))]

    # Trim
    if trim_unused_by_output:
        o_model.layers = barracuda.trim(o_model.layers, trim_unused_by_output,
                                        args.verbose)

    # Create load layers for constants
    const_tensors = [i for i in all_inputs if i in o_model.tensors]
    const_tensors += o_model.globals
    for x in const_tensors:
        shape = dims_to_barracuda_shape(get_tensor_dims(o_model.tensors[x]))

        o_l = Struct(
            type=255,  # Load
            class_name="Const",
            name=x,
            pads=[0, 0, 0, 0],
            strides=[],
            pool_size=[],
            axis=-1,
            alpha=1,
            beta=0,
            activation=0,
            inputs=[],
            tensors=[
                Struct(name=x,
                       shape=shape,
                       data=np.reshape(get_tensor_data(o_model.tensors[x]),
                                       shape).astype(np.float32))
            ])
        o_model.layers.insert(0, o_l)

    # Find model inputs & outputs
    all_layers = {l.name for l in o_model.layers}
    # global inputs => are inputs that are NOT connected to any layer in the network
    # global outputs => are outputs that are NOT feeding any layer in the network OR are coming from Identity layers
    o_model.inputs = {
        i: o_input_shapes[i]
        for l in o_model.layers for i in l.inputs
        if i not in all_layers and i not in o_model.memories
    }

    def is_output_layer(layer):
        if layer.class_name == 'Const':  # Constants never count as global output even when unconnected
            return False
        if layer.name not in all_inputs:  # this layer is not inputing to any other layer
            return True
        if layer.class_name == 'Activation' and layer.activation == 0:  # Identity marks global output
            return True
        return False

    o_model.outputs = [l.name for l in o_model.layers if is_output_layer(l)]

    # Compress
    if compress_f16:
        o_model = barracuda.compress(o_model)

    # Sort model so that layer inputs are always ready upfront
    o_model.layers = barracuda.sort(o_model.layers, o_model.inputs,
                                    o_model.memories, args.verbose)

    # Summary
    barracuda.summary(o_model,
                      print_layer_links=args.print_layer_links or args.verbose,
                      print_barracuda_json=args.print_barracuda_json
                      or args.verbose,
                      print_tensors=args.print_tensors or args.verbose)

    # Write to file
    barracuda.write(o_model, target_file)
    print('DONE: wrote', target_file, 'file.')
コード例 #10
0
 def probeValue(self, path):
     any_msg = any_pb2.Any()
     any_msg.Pack(self.model)
     typedValue = gnmi_pb2.TypedValue(any_val=any_msg)
     logger.debug(MessageToJson(self.model))
     return gnmi_pb2.Update(path=path, val=typedValue) 
コード例 #11
0
 def to_json(self):
     return MessageToJson(self._data)
コード例 #12
0
    def CreateVolume(self, request, context):
        Utils.validate_param_exists(request, 'name')
        name = request.name
        capacity = self._parse_required_capacity(request.capacity_range)
        parameters = request.parameters

        #UNUSED - secrets = request.secrets
        #UNUSED - volume_content_source = request.volume_content_source
        #UNUSED - accessibility_requirements = request.accessibility_requirements

        reqJson = MessageToJson(request)
        self.logger.debug('create volume request: {}'.format(reqJson))
        reqDict = MessageToDict(request)
        capabilities = reqDict['volumeCapabilities']

        is_file_system = False
        is_block_device = False

        csi_metadata = {'csi_name': name, 'capabilities': capabilities}

        for capability in capabilities:
            if 'mount' in capability:
                is_file_system = True
                csi_metadata['fsType'] = capability['mount']['fsType']
            else:
                csi_metadata['block'] = True

            access_mode = capability['accessMode']['mode']
            if Consts.AccessMode.fromCsiString(
                    access_mode) not in Consts.AccessMode.allowed_access_modes(
                    ):
                self.logger.warning(
                    'Requested mode {} is not enforced by NVMesh Storage backend'
                    .format(access_mode))

        if is_file_system and is_block_device:
            raise DriverError(
                StatusCode.INVALID_ARGUMENT,
                'Error: Contradicting capabilities both Block Volume and FileSystem Volume were requested for volume {}. request: {}'
                .format(name, reqJson))

        nvmesh_vol_name = Utils.volume_id_to_nvmesh_name(name)
        nvmesh_params = {}

        self.logger.debug('create volume parameters: {}'.format(parameters))

        if 'vpg' in parameters:
            self.logger.debug('Creating Volume from VPG {}'.format(
                parameters['vpg']))
            nvmesh_params['VPG'] = parameters['vpg']

            # This is a workaround since the nvmesh create volume api expects a 'RAIDLevel'
            # but if 'VPG' is present 'RAIDLevel' field will be ignored
            # and the RAIDLevel will be fetched from the VPG.
            nvmesh_params['RAIDLevel'] = RAIDLevels.CONCATENATED
        else:
            self.logger.debug('Creating without VPG')
            for param in parameters:
                nvmesh_params[param] = parameters[param]

            self._handle_non_vpg_params(nvmesh_params)

        self.logger.debug('nvmesh_params = {}'.format(nvmesh_params))

        volume = NVMeshVolume(name=nvmesh_vol_name,
                              capacity=capacity,
                              csi_metadata=csi_metadata,
                              **nvmesh_params)

        self.logger.debug('Creating volume: {}'.format(str(volume)))
        err, data = VolumeAPI().save([volume])

        if err:
            raise DriverError(
                StatusCode.RESOURCE_EXHAUSTED,
                'Error: {} Details: {} Volume Requested: {}'.format(
                    err, data, str(volume)))
        elif not type(data) == list or not data[0]['success']:
            if 'Name already Exists' in data[0]['error']:
                existing_capacity = self._get_nvmesh_volume_capacity(
                    nvmesh_vol_name)
                if capacity == existing_capacity:
                    # Idempotency - same Name same Capacity - return success
                    pass
                else:
                    raise DriverError(
                        StatusCode.ALREADY_EXISTS,
                        'Error: {} Details: {}'.format(err, data))
            else:
                raise DriverError(StatusCode.RESOURCE_EXHAUSTED,
                                  'Error: {} Details: {}'.format(err, data))

        err, details = self._wait_for_volume_status(
            volume._id, NVMeshConsts.VolumeStatuses.ONLINE)

        if err:
            if err == 'Timed out Waiting for Volume to be Online':
                raise DriverError(StatusCode.FAILED_PRECONDITION,
                                  'Error: {} Details: {}'.format(err, details))
            else:
                raise DriverError(StatusCode.INVALID_ARGUMENT, err)
        else:
            self.logger.debug(details)

        # we return the nvmesh_vol_name that we created to the CO
        # all subsequent requests for this volume will have volume_id of the nvmesh volume name
        csiVolume = Volume(volume_id=nvmesh_vol_name, capacity_bytes=capacity)
        return CreateVolumeResponse(volume=csiVolume)
コード例 #13
0
ファイル: KVCommand_example.py プロジェクト: x5g/proto-test
kvMessage = KVCommand_pb2.KVMessage()
kvMessage.id = 2020
kvMessage.len = 1010
kvResponse = kvMessage.response
kvResponse.status = 2010
kvResponse.message = '1020'
kvRequest = kvMessage.request
kvRequest.type = KVCommand_pb2.KVRequest.ITEM
kvRequest.item.command = 'command'
kvRequest.login.username = '******'
kvRequest.login.password = '******'

# 以二进制形式保存
f = open(PB_FILE_PATH, "wb")
f.write(kvMessage.SerializeToString())
f.close()

# 读kvMessage
kvMessage = KVCommand_pb2.KVMessage()
f = open(PB_FILE_PATH, "rb")
kvMessage.ParseFromString(f.read())
f.close()
from google.protobuf.json_format import MessageToJson  # 关键
json = MessageToJson(kvMessage)
print(json)

# 保存json
f = open(JSON_FILE_PATH, "w")
f.write(json)
f.close()
コード例 #14
0
ファイル: test_request.py プロジェクト: jina-ai/jina
def test_init(req):
    assert DataRequest(request=None)
    assert DataRequest(request=req)
    assert DataRequest(request=MessageToDict(req))
    assert DataRequest(request=MessageToJson(req))
コード例 #15
0
messageData = MessageData_pb2.MessageData()
messageData.content = str.encode(str(random.randint(0, 65535)))
sysHeader = messageData.sysHeader
sysHeader.bizSeqNo = str(random.random())
sysHeader.sysSeqNo = str(random.random())
sysHeader.ip = str(random.random())
sysHeader.userId = str(random.random())
sysHeader.version = random.randint(0, 65535)
sysHeader.channelId = str(random.random())
sysHeader.origSysId = str(random.random())
sysHeader.prevSysId = str(random.random())

# 以二进制形式保存
f = open(PB_FILE_PATH, "wb")
f.write(messageData.SerializeToString())
f.close()

# 读messageData
messageData = MessageData_pb2.MessageData()
f = open(PB_FILE_PATH, "rb")
messageData.ParseFromString(f.read())
f.close()
from google.protobuf.json_format import MessageToJson  # 关键
json = MessageToJson(messageData)
print(json)

# 保存json
f = open(JSON_FILE_PATH, "w")
f.write(json)
f.close()
コード例 #16
0
ファイル: entity.py プロジェクト: singh-b/feast
 def __str__(self):
     return str(MessageToJson(self.to_proto()))
コード例 #17
0
ファイル: transcript.py プロジェクト: rafaelbusetti/Repo-2019
    #speech_contexts=[speech.types.SpeechContext(phrases=['um', 'dois','três'])],
    enable_speaker_diarization=True,
    diarization_speaker_count=2,
    audio_channel_count=1,
    profanity_filter=True,
    enable_separate_recognition_per_channel=False)

audio = speech.types.RecognitionAudio(uri="gs://temp/audio.wav")

operation = client.long_running_recognize(config, audio)

print('Waiting for operation to complete...')
response = operation.result(timeout=100000000)

from google.protobuf.json_format import MessageToJson
serialized = MessageToJson(response)

with open("/home/rubens/Documents/response2.txt", "w") as text_file:
    text_file.write(serialized)

import json
with open('/home/rubens/Documents/data4.json', 'w') as f:
    json.dump(serialized, f)

texto = []

for i in range(0, len(response.results)):
    print(response.results[i].alternatives[0].words)
    texto.append(response.results[i].alternatives[0].transcript)

texto
コード例 #18
0
ファイル: message2_example.py プロジェクト: x5g/proto-test
import message2_pb2

PB_FILE_PATH = 'message2.pb'
JSON_FILE_PATH = 'message2.json'

command = message2_pb2.Command()
command.type = message2_pb2.Command.START_TEST
command.name = 'Test Case 1'
command.data = 'Test Data'

# 以二进制形式保存
f = open(PB_FILE_PATH, "wb")
f.write(command.SerializeToString())
f.close()

# 读command
command = message2_pb2.Command()
f = open(PB_FILE_PATH, "rb")
command.ParseFromString(f.read())
f.close()
from google.protobuf.json_format import MessageToJson  # 关键
json = MessageToJson(command)
print(json)

# 保存json
f = open(JSON_FILE_PATH, "w")
f.write(json)
f.close()
コード例 #19
0
    def _make_entry_resource(self,
                             text=None,
                             info=None,
                             message=None,
                             labels=None,
                             insert_id=None,
                             severity=None,
                             http_request=None):
        """Return a log entry resource of the appropriate type.

        Helper for :meth:`log_text`, :meth:`log_struct`, and :meth:`log_proto`.

        Only one of ``text``, ``info``, or ``message`` should be passed.

        :type text: string or :class:`NoneType`
        :param text: text payload

        :type info: dict or :class:`NoneType`
        :param info: struct payload

        :type message: Protobuf message or :class:`NoneType`
        :param message: protobuf payload

        :type labels: dict or :class:`NoneType`
        :param labels: labels passed in to calling method.

        :type insert_id: string or :class:`NoneType`
        :param insert_id: (optional) unique ID for log entry.

        :type severity: string or :class:`NoneType`
        :param severity: (optional) severity of event being logged.

        :type http_request: dict or :class:`NoneType`
        :param http_request: (optional) info about HTTP request associated with
                             the entry

        :rtype: dict
        :returns: The JSON resource created.
        """
        resource = {
            'logName': self.full_name,
            'resource': {
                'type': 'global'
            },
        }

        if text is not None:
            resource['textPayload'] = text

        if info is not None:
            resource['jsonPayload'] = info

        if message is not None:
            as_json_str = MessageToJson(message)
            as_json = json.loads(as_json_str)
            resource['protoPayload'] = as_json

        if labels is None:
            labels = self.labels

        if labels is not None:
            resource['labels'] = labels

        if insert_id is not None:
            resource['insertId'] = insert_id

        if severity is not None:
            resource['severity'] = severity

        if http_request is not None:
            resource['httpRequest'] = http_request

        return resource
コード例 #20
0
            self.chunk_data = zlib.decompress(self.f.read(zchunk_size), 15, self.chunk_gsize)
            self.chunk_size = len(self.chunk_data)
            self.chunk_data_idx = 0

        # parse message - we're using a fresh message
        # using MergeFromString() is slightly faster than using ParseFromString()
        msg_size, = struct.unpack_from('<I', self.chunk_data, self.chunk_data_idx)
        msg = plog_pb2.LogEntry()
        msg_start = self.chunk_data_idx + 4
        msg_end = msg_start + msg_size
        msg.MergeFromString(self.chunk_data[msg_start:msg_end])

        # update state
        self.chunk_data_idx = msg_end

        if not self.chunk_data_idx < self.chunk_size:
            self.chunk_idx += 1
            self.chunk_size = 0
            self.chunk_data = None
            self.chunk_data_idx = 0

        return msg

if __name__ == "__main__":
    print('[')
    with PLogReader(sys.argv[1]) as plr:
        for i, m in enumerate(plr):
            if i > 0: print(',')
            print(MessageToJson(m), end='')
    print('\n]')
コード例 #21
0
import ProtoMsg_pb2

PB_FILE_PATH = 'ProtoMsg.pb'
JSON_FILE_PATH = 'ProtoMsg.json'

protoMsg = ProtoMsg_pb2.Message()
protoMsg.content = 'Hello World!'


# 以二进制形式保存
f = open(PB_FILE_PATH, "wb")
f.write(protoMsg.SerializeToString())
f.close()

# 读protoMsg
protoMsg = ProtoMsg_pb2.Message()
f = open(PB_FILE_PATH, "rb")
protoMsg.ParseFromString(f.read())
f.close()
from google.protobuf.json_format import MessageToJson   # 关键
json = MessageToJson(protoMsg)
print(json)

# 保存json
f = open(JSON_FILE_PATH, "w")
f.write(json)
f.close()

コード例 #22
0
import sys


@pytest.mark.skipif(
    sys.version_info < (3, 8, 0),
    reason='somehow this does not work on Github workflow with Py3.7, '
    'but Py 3.8 is fine, local Py3.7 is fine')
def test_on_bad_iterator():
    # this should not stuck the server as request_generator's error is handled on the client side
    f = Flow().add()
    with f:
        f.index([1, 2, 3])


@pytest.mark.parametrize('builder', [
    lambda x: x.SerializeToString(), lambda x: MessageToJson(x),
    lambda x: MessageToDict(x), lambda x: Document(x)
])
def test_data_type_builder_doc(builder):
    a = DocumentProto()
    a.id = 'a236cbb0eda62d58'
    d, t = _build_doc(builder(a), DataInputType.DOCUMENT)
    assert d.id == a.id
    assert t == DataInputType.DOCUMENT


def test_data_type_builder_doc_bad():
    a = DocumentProto()
    a.id = 'a236cbb0eda62d58'
    with pytest.raises(BadDocType):
        _build_doc(b'BREAKIT!' + a.SerializeToString(), DataInputType.DOCUMENT)
コード例 #23
0
import qwe_pb2

PB_FILE_PATH = 'qwe.pb'
JSON_FILE_PATH = 'qwe.json'

qwe = qwe_pb2.Qwe()
qwe.age = 24
qwe.name = 'ivan'


# 以二进制形式保存
f = open(PB_FILE_PATH, "wb")
f.write(qwe.SerializeToString())
f.close()

# 读qwe
qwe = qwe_pb2.Qwe()
f = open(PB_FILE_PATH, "rb")
qwe.ParseFromString(f.read())
f.close()
from google.protobuf.json_format import MessageToJson   # 关键
json = MessageToJson(qwe)
print(json)

# 保存json
f = open(JSON_FILE_PATH, "w")
f.write(json)
f.close()

コード例 #24
0
def proto_to_json():
    message = personInfo_pb2.PersonInfo()
    message.name = "adf"
    json_string = MessageToJson(message)
    print json_string
コード例 #25
0
ファイル: bridgeDomain.py プロジェクト: wxxxxxz/vpp-agent
 def validate(self):
     bridgeDomain = BridgeDomain()
     Parse(json.dumps(self.values), bridgeDomain)
     return MessageToJson(bridgeDomain, preserving_proto_field_name=True, indent=None)
コード例 #26
0
    def test_commit_w_bound_client(self):
        import json
        import datetime
        from google.protobuf.json_format import MessageToJson
        from google.protobuf.struct_pb2 import Struct
        from google.protobuf.struct_pb2 import Value
        from google.cloud._helpers import _datetime_to_rfc3339
        from google.cloud.logging.entries import _GLOBAL_RESOURCE

        TEXT = "This is the entry text"
        STRUCT = {"message": TEXT, "weather": "partly cloudy"}
        message = Struct(fields={"foo": Value(bool_value=True)})
        IID1 = "IID1"
        IID2 = "IID2"
        IID3 = "IID3"
        TIMESTAMP1 = datetime.datetime(2016, 12, 31, 0, 0, 1, 999999)
        TIMESTAMP2 = datetime.datetime(2016, 12, 31, 0, 0, 2, 999999)
        TIMESTAMP3 = datetime.datetime(2016, 12, 31, 0, 0, 3, 999999)
        TRACE1 = "12345678-1234-5678-1234-567812345678"
        TRACE2 = "12345678-1234-5678-1234-567812345679"
        TRACE3 = "12345678-1234-5678-1234-567812345670"
        SPANID1 = "000000000000004a"
        SPANID2 = "000000000000004b"
        SPANID3 = "000000000000004c"
        ENTRIES = [
            {
                "textPayload": TEXT,
                "insertId": IID1,
                "timestamp": _datetime_to_rfc3339(TIMESTAMP1),
                "resource": _GLOBAL_RESOURCE._to_dict(),
                "trace": TRACE1,
                "spanId": SPANID1,
                "traceSampled": True,
            },
            {
                "jsonPayload": STRUCT,
                "insertId": IID2,
                "timestamp": _datetime_to_rfc3339(TIMESTAMP2),
                "resource": _GLOBAL_RESOURCE._to_dict(),
                "trace": TRACE2,
                "spanId": SPANID2,
                "traceSampled": False,
            },
            {
                "protoPayload": json.loads(MessageToJson(message)),
                "insertId": IID3,
                "timestamp": _datetime_to_rfc3339(TIMESTAMP3),
                "resource": _GLOBAL_RESOURCE._to_dict(),
                "trace": TRACE3,
                "spanId": SPANID3,
                "traceSampled": True,
            },
        ]
        client = _Client(project=self.PROJECT)
        api = client.logging_api = _DummyLoggingAPI()
        logger = _Logger()
        batch = self._make_one(logger, client=client)

        batch.log_text(
            TEXT,
            insert_id=IID1,
            timestamp=TIMESTAMP1,
            trace=TRACE1,
            span_id=SPANID1,
            trace_sampled=True,
        )
        batch.log_struct(
            STRUCT,
            insert_id=IID2,
            timestamp=TIMESTAMP2,
            trace=TRACE2,
            span_id=SPANID2,
            trace_sampled=False,
        )
        batch.log_proto(
            message,
            insert_id=IID3,
            timestamp=TIMESTAMP3,
            trace=TRACE3,
            span_id=SPANID3,
            trace_sampled=True,
        )
        batch.commit()

        self.assertEqual(list(batch.entries), [])
        self.assertEqual(api._write_entries_called_with,
                         (ENTRIES, logger.full_name, None, None))
コード例 #27
0
PB_FILE_PATH = 'employee.pb'
JSON_FILE_PATH = 'employee.json'

employeeResponse = employee_pb2.EmployeeResponse()
employeeDetails = employeeResponse.message
employeeDetails.id = 1
employeeDetails.email = '*****@*****.**'
employeeDetails.firstName = 'First1'
employeeDetails.lastName = 'Last1'

# 以二进制形式保存
f = open(PB_FILE_PATH, "wb")
f.write(employeeResponse.SerializeToString())
f.close()

# 读employeeResponse
employeeResponse = employee_pb2.EmployeeResponse()
f = open(PB_FILE_PATH, "rb")
employeeResponse.ParseFromString(f.read())
f.close()
from google.protobuf.json_format import MessageToJson  # 关键

json = MessageToJson(employeeResponse)
print(json)

# 保存json
f = open(JSON_FILE_PATH, "w")
f.write(json)
f.close()
コード例 #28
0
    def test_log_proto_w_explicit(self):
        import json
        import datetime
        from google.protobuf.json_format import MessageToJson
        from google.protobuf.struct_pb2 import Struct
        from google.protobuf.struct_pb2 import Value
        from google.cloud.logging.resource import Resource

        message = Struct(fields={"foo": Value(bool_value=True)})
        ALT_LOG_NAME = "projects/foo/logs/alt.log.name"
        DEFAULT_LABELS = {"foo": "spam"}
        LABELS = {"foo": "bar", "baz": "qux"}
        IID = "IID"
        SEVERITY = "CRITICAL"
        METHOD = "POST"
        URI = "https://api.example.com/endpoint"
        STATUS = "500"
        TRACE = "12345678-1234-5678-1234-567812345678"
        SPANID = "000000000000004a"
        REQUEST = {
            "requestMethod": METHOD,
            "requestUrl": URI,
            "status": STATUS
        }
        TIMESTAMP = datetime.datetime(2016, 12, 31, 0, 1, 2, 999999)
        RESOURCE = Resource(type="gae_app",
                            labels={
                                "module_id": "default",
                                "version_id": "test"
                            })
        ENTRIES = [{
            "logName": ALT_LOG_NAME,
            "protoPayload": json.loads(MessageToJson(message)),
            "labels": LABELS,
            "insertId": IID,
            "severity": SEVERITY,
            "httpRequest": REQUEST,
            "timestamp": "2016-12-31T00:01:02.999999Z",
            "resource": RESOURCE._to_dict(),
            "trace": TRACE,
            "spanId": SPANID,
            "traceSampled": True,
        }]
        client1 = _Client(self.PROJECT)
        client2 = _Client(self.PROJECT)
        api = client2.logging_api = _DummyLoggingAPI()
        logger = self._make_one(self.LOGGER_NAME,
                                client=client1,
                                labels=DEFAULT_LABELS)

        logger.log_proto(
            message,
            log_name=ALT_LOG_NAME,
            client=client2,
            labels=LABELS,
            insert_id=IID,
            severity=SEVERITY,
            http_request=REQUEST,
            timestamp=TIMESTAMP,
            resource=RESOURCE,
            trace=TRACE,
            span_id=SPANID,
            trace_sampled=True,
        )

        self.assertEqual(api._write_entries_called_with,
                         (ENTRIES, None, None, None))
コード例 #29
0
import employee2_pb2

PB_FILE_PATH = 'employee2.pb'
JSON_FILE_PATH = 'employee2.json'

employee = employee2_pb2.Employee()
employee.EmployeeId = 1001
employee.FirstName = 'Ishu'
employee.LastName = 'Pathipaka'
employee.Age = 27

# 以二进制形式保存
f = open(PB_FILE_PATH, "wb")
f.write(employee.SerializeToString())
f.close()

# 读employee
employee = employee2_pb2.Employee()
f = open(PB_FILE_PATH, "rb")
employee.ParseFromString(f.read())
f.close()
from google.protobuf.json_format import MessageToJson  # 关键

json = MessageToJson(employee)
print(json)

# 保存json
f = open(JSON_FILE_PATH, "w")
f.write(json)
f.close()
コード例 #30
0
ファイル: test_request.py プロジェクト: willyspinner/jina
def test_init(req):
    assert Request(request=None)
    assert Request(request=req, copy=True)
    assert Request(request=req, copy=False)
    assert Request(request=MessageToDict(req))
    assert Request(request=MessageToJson(req))