示例#1
0
 def start_grpc(self):
     channel = implementations.insecure_channel(self.ip, self.port)
     stub = witness_pb2.beta_create_WitnessService_stub(channel)
     print "hello"
     while True:
         req = witness_pb2.WitnessBatchRequest()
         Merge(
             'Context{\nSessionId:"grpc_test_123"\n Type:1 \n Functions:%s \n}\n'
             % (self.function), req)
         for batch in range(0, self.batchsize):
             message = self.queue.get()
             image = 'Images {\n Data { \n URI:"%s" \n} \n}' % (message)
             Merge(image, req)
         resp = stub.BatchRecognize(req, self.timeout)
         print resp
         lock.acquire()
         global total
         total = total + self.batchsize
         log_file = open('./grpc.log', 'a')
         log_file.write(
             '%s total send pictures is %d, number of vehicle is %d , status: %s\n'
             % (time.strftime("%Y-%m-%d %X", time.localtime()), total,
                len(resp.Results[0].Vehicles), resp.Context.Status))
         log_file.close()
         lock.release()
     self.queue.task_done()
示例#2
0
def test_calibration_heatmap():
    calibration = cg.Calibration(_CALIBRATION_DATA)

    heatmap = calibration.heatmap('t1')
    figure = mpl.figure.Figure()
    axes = figure.add_subplot(111)
    heatmap.plot(axes)
    assert axes.get_title() == 'T1'

    heatmap = calibration.heatmap('two_qubit_xeb')
    figure = mpl.figure.Figure()
    axes = figure.add_subplot(999)
    heatmap.plot(axes)
    assert axes.get_title() == 'Two Qubit Xeb'

    with pytest.raises(ValueError, match="one or two qubits.*multi_qubit"):
        multi_qubit_data = Merge(
            """metrics: [{
                name: 'multi_qubit',
                targets: ['0_0', '0_1', '1_0'],
                values: [{double_val: 0.999}]}]""",
            v2.metrics_pb2.MetricsSnapshot(),
        )
        cg.Calibration(multi_qubit_data).heatmap('multi_qubit')
    with pytest.raises(ValueError, match="single metric values.*multi_value"):
        multi_qubit_data = Merge(
            """metrics: [{
                name: 'multi_value',
                targets: ['0_0'],
                values: [{double_val: 0.999}, {double_val: 0.001}]}]""",
            v2.metrics_pb2.MetricsSnapshot(),
        )
        cg.Calibration(multi_qubit_data).heatmap('multi_value')
示例#3
0
def test_proto(prototxt):
    import sys
    sys.path.append('../proto')
    from berry_pb2 import NetParameter
    with open(prototxt, 'r') as f:
        txt = f.read()
    net = Merge(txt, NetParameter())
    print net.ListFields()
示例#4
0
def read_prototxt(ptxt_path, message):
    """Takes a path to a ``.prototxt`` file and a protobuf message and merges
    the two."""
    with open(ptxt_path) as fp:
        ptxt_contents = fp.read()

    return Merge(ptxt_contents, message)
def load_prototxt_def(protofile):
    import caffe.proto.caffe_pb2 as caffe_pb2
    from google.protobuf.text_format import Merge
    net = caffe_pb2.NetParameter()
    with open(protofile, 'r') as f:
        Merge(f.read(), net)
    return net
def _get_config():
    """Gets config."""
    config_file = FLAGS.config
    config = ResnetModelConfig()
    assert config_file is not None, 'Must pass in a configuration file through --config'
    Merge(open(config_file).read(), config)
    return config
示例#7
0
def convert_prototxt_to_json(path_to_prototxt, path_to_json):
    net = znet_caffe_pb2.NetParameter()
    Merge((open(path_to_prototxt, 'r')).read(), net)

    with open(path_to_json, 'w', encoding="utf8") as f:
        net_dict = protobuf_to_dict(net)
        json.dump(net_dict, f, indent=3)
示例#8
0
def test_get_calibration(get_calibration):
    qjob = qtypes.QuantumJob(execution_status=qtypes.ExecutionStatus(
        calibration_name='projects/a/processors/p/calibrations/123'))
    calibration = qtypes.QuantumCalibration(data=_to_any(
        Merge(
            """
    timestamp_ms: 123000,
    metrics: [{
        name: 'xeb',
        targets: ['0_0', '0_1'],
        values: [{
            double_val: .9999
        }]
    }, {
        name: 't1',
        targets: ['0_0'],
        values: [{
            double_val: 321
        }]
    }, {
        name: 'globalMetric',
        values: [{
            int32_val: 12300
        }]
    }]
""", v2.metrics_pb2.MetricsSnapshot())))
    get_calibration.return_value = calibration

    job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qjob)
    assert list(job.get_calibration()) == ['xeb', 't1', 'globalMetric']
    get_calibration.assert_called_once_with('a', 'p', 123)
示例#9
0
def get_solver(args):
    from caffe.proto.caffe_pb2 import SolverParameter
    from google.protobuf.text_format import Merge

    solver = SolverParameter()
    Merge(open(args.solver, 'r').read(), solver)
    return solver
def main():
    args = parse_args()

    if not args.verbose:
        os.environ['GLOG_minloglevel'] = '2'

    import caffe
    from caffe.proto import caffe_pb2

    if args.cpu:
        caffe.set_mode_cpu()
    else:
        caffe.set_mode_gpu()

    if args.verbose:
        if args.cpu:
            print("CPU mode set.")
        else:
            print("GPU mode set.")

    forwardErrorSum = 0.0
    diffErrorSum = 0.0
    weightDiffErrorSum = 0.0

    for i in range(args.tests_number):
        tmpNetProto = tempfile.NamedTemporaryFile()
        tmpNetProto.write(createConvolutionsNet(args.params, args.update_weight_diff, args.scale_weight_diff))
        tmpNetProto.flush()
        net = caffe.Net(tmpNetProto.name, caffe.TEST)
        deploy = caffe_pb2.NetParameter()
        Merge((open(tmpNetProto.name, 'r').read()), deploy)
        tmpNetProto.close()
        sys.stdout.write("{}. ".format(i + 1))
        if not args.verbose:
            sys.stdout.write("Input shape: {},{},{} ".format(net.blobs['convolution_input'].data.shape[2],
                                                             net.blobs['convolution_input'].data.shape[3],
                                                             net.blobs['convolution_input'].data.shape[1]))
            convParams = deploy.layer[2].convolution_param
            sys.stdout.write("Conv params: {},{},{},{} ".format(convParams.num_output,
                                                                convParams.kernel_size[0],
                                                                convParams.stride[0],
                                                                convParams.pad[0]))
            sys.stdout.write("Output shape: {},{},{}\n".format(net.blobs['convolution'].data.shape[2],
                                                               net.blobs['convolution'].data.shape[3],
                                                               net.blobs['convolution'].data.shape[1]))
        forwardError, diffError, weightDiffError = testBinaryConvolutionLayer(net, deploy, args)
        forwardErrorSum += forwardError
        diffErrorSum += diffError
        weightDiffErrorSum += weightDiffError

    meanForwardError = forwardErrorSum / args.tests_number
    meanDiffError = diffErrorSum / args.tests_number
    meanWeightDiffError = weightDiffErrorSum / args.tests_number

    print ("\n#############################################################")
    print ("Number of tests: {}\n".format(args.tests_number))
    print ("Mean forward error: {}".format(meanForwardError))
    print ("Mean diff error: {}".format(meanDiffError))
    print ("Mean weight diff error: {}".format(meanWeightDiffError))
    print ("#############################################################")
示例#11
0
def readCaffeModelFile(txt_path='', binary_path=''):
    net = caffe.Net(txt_path, binary_path, caffe.TEST)
    result = []
    net_proto = caffe_proto.NetParameter()
    with open(txt_path, 'r') as f:
        Merge(f.read(), net_proto)

    dimension = net.blobs['data'].data.shape[1:]
    proto_layer_counter = 0
    scale = 1.0
    for i in range(len(net.layers)):
        l = net.layers[i]
        type = l.type.upper()
        if type not in layer_not_in_prototxt:
            proto_layer_counter += 1
        lpt = net_proto.layer[proto_layer_counter]
        if layer_skipped[type]:
            if type == DATA:
                scale = lpt.transform_param.scale
            continue
        else:
            l.ltype = type
            if type == POOLING:
                p = lpt.pooling_param
                fixPooling(l, p)
            elif type == SOFTMAX_WITHLOSS or type == SOFTMAX:
                l.ltype = SOFTMAX
            elif type == CONVOLUTION:
                if len(lpt.convolution_param.stride._values) == 0:
                    l.stride = 0
                else:
                    l.stride = lpt.convolution_param.stride._values[0]
                if len(lpt.convolution_param.pad._values) == 0:
                    l.pad = 0
                else:
                    l.pad = lpt.convolution_param.pad._values[0]

            # dimension = None
            l.shape = dimension
            if net.blobs.has_key(
                    lpt.name
            ):  ## because the shape is its input dimension, which is contained in last layer
                if len(net.blobs[lpt.name].data.shape) != 0:
                    dimension = net.blobs[lpt.name].data.shape[1:]

            result.append([l, lpt])


## use the name in the net_proto.layer to query the input_dimension and set
## if the last input_dimension is not null, set it.
## so even if relu can't search it, it still gets the correct result
    result.append(
        [edict({
            'ltype': RETURN_CALLBACK,
            'shape': dimension
        }), None])
    dimension = net.blobs['data'].data.shape[1:]

    return result, dimension, scale
示例#12
0
def read_cartridge_template(path):
    catridge = None

    with open(path, "r") as f:
        cartridge = cartridge_pb2.Cartridge()
        Merge(f.read(), cartridge)

    return cartridge
示例#13
0
文件: util.py 项目: LordHui/ZendarSDK
def read_proto_text(path, model):
    """
    read protobuf in text form
    """
    with open(path, 'r') as fp:
        Merge(fp.read(), model)

    return model
示例#14
0
文件: mmdnn.py 项目: honkliu/March
def get_ops():
    sdk_path = os.path.dirname(__file__)
    ops_file = os.path.join(sdk_path, 'mmdnn_ops.pbtxt')

    ops = op_def_pb2.OpList()
    with open(ops_file) as fn:
        Merge(fn.read(), ops)
    ops_list = [MessageToDict(op) for op in ops.op]
    return ops_list
示例#15
0
def read_proto_from_file(proto, filename, binary=True):
    if binary:
        f = open(filename, "rb")
        proto.ParseFromString(f.read())
        f.close()
    else:
        f = open(filename, "r")
        Merge(f.read(), proto)
        f.close()
def transfer2depthwise(proto_src, proto_dst):
    net = caffe_pb2.NetParameter()
    Merge(open(proto_src, 'r').read(), net)
    for layer in net.layer:
        if layer.type == "Convolution":
            if layer.convolution_param.group != 1:
                layer.type = "DepthwiseConvolution"
    with open(proto_dst, 'w') as tf:
        tf.write(str(net))
示例#17
0
    def test_pack_unpack(self):
        expected_cartridge = Cartridge()
        Merge(CARTRIDGE_TEXT, expected_cartridge)

        crypto = Desx_Crypto()
        checksum = Crc16_Checksum()
        manager = Manager(crypto, checksum)
        unpacked_cartridge = manager.unpack(manager.pack(expected_cartridge))

        assert expected_cartridge == unpacked_cartridge
示例#18
0
def _get_config():
    """Gets config."""
    if FLAGS.config is None:
        config_file = os.path.join(
            os.path.realpath(cnn.__path__[0]), 'configs/cifar-{}.prototxt'.format(FLAGS.model))
    else:
        config_file = FLAGS.config
    config = ResnetModelConfig()
    Merge(open(config_file).read(), config)
    return config
示例#19
0
 def start(self):
     files = glob.glob(
         os.path.join(os.path.dirname(os.path.abspath(__file__)),
                      'examples', '*.proto'))
     for filepath in sorted(files):
         with open(filepath) as f:
             feed = gtfs_realtime_pb2.FeedMessage()
             Merge(f.read(), feed)
             self.feed_processor.process_feed(
                 self.feed_filter.filterTume(feed))
示例#20
0
def _generate_cell_chunks(chunk_text_pbs):
    from google.protobuf.text_format import Merge
    from google.cloud.bigtable_v2.proto.bigtable_pb2 import ReadRowsResponse

    chunks = []

    for chunk_text_pb in chunk_text_pbs:
        chunk = ReadRowsResponse.CellChunk()
        chunks.append(Merge(chunk_text_pb, chunk))

    return chunks
示例#21
0
    def __init__(self, dataname, txt_path, model_path, config):
        self.net = caffe.Net(txt_path, model_path, caffe.TEST)
        self.net_proto = caffe_proto.NetParameter()
        self.config = {token[0]: token[1] for token in config}
        self.dataname = dataname
        with open(txt_path, 'r') as f:
            Merge(f.read(), self.net_proto)
        self.layers = []
        self.prototxt_dict = {}
        for i in range(len(self.net_proto.layer)):
            self.prototxt_dict[
                self.net_proto.layer[i].name] = self.net_proto.layer[i]

        prev_layer = None
        for i in range(len(self.net.layers)):
            layer = self.net.layers[i]
            name = self.net._layer_names[i]
            model_param, proto_param = None, None
            try:
                model_param = self.net.blobs[self.fitDataLayer(name,
                                                               lowercase=True)]
                proto_param = self.prototxt_dict[name]
            except KeyError:
                pass
            l = construct(
                self.fitDataLayer(layer.type), {
                    'blob': layer.blobs,
                    'model_param': model_param,
                    'proto_param': proto_param,
                    'prev': prev_layer
                })
            if l:
                if l.param['pad'] > 0:
                    lpadding = construct(
                        PaddingType, {
                            'blob': None,
                            'proto_param': {
                                'pad': l.param['pad']
                            },
                            'model_param': None,
                            'prev': prev_layer
                        })
                    self.layers.append(lpadding)
                prev_layer = l
                self.layers.append(l)

        self.layers.append(
            construct(
                OutputType, {
                    'blob': None,
                    'model_param': None,
                    'proto_param': None,
                    'prev': prev_layer
                }))
示例#22
0
def parse_net_proto(prototxt):
    from caffe.proto import caffe_pb2
    from google.protobuf.text_format import Merge
    from collections import OrderedDict
    net_param = caffe_pb2.NetParameter()
    with open(prototxt, 'r') as f:
        Merge(f.read(), net_param)
    layer_names = []
    all_layer_info = OrderedDict()
    negative_slope = None
    is_bn = 0
    for layer in net_param.layer:
        layer_name = layer.name
        one_layer_info = {}
        # one_layer_info['ps'] = layer.
        if layer.type == 'Pooling':
            param = layer.pooling_param
            # pooling_param  = caffe_pb2.PoolingParameter()
            one_layer_info['pooling'] = 'max' if param.pool == 0 else 'ave'

            # default value for Pooling layer
            # note that Pooling layer don't support ps operation
            one_layer_info['ps'] = False
            one_layer_info['bn'] = False
            one_layer_info['dilation'] = 1
        elif layer.type == 'Convolution':
            param = layer.convolution_param
            one_layer_info['ps'] = help_parse(param.position_sensetive, 'ps')
            one_layer_info['dilation'] = help_parse(param.dilation, 'dilation')
            one_layer_info['bn'] = False
        elif layer.type == 'ReLU':
            negative_slope = help_parse(
                layer.relu_param.negative_slope, 'negative_slope')
            continue
        elif layer.type == 'BatchNorm' or layer.type == 'Scale':
            is_bn += 1
            continue
        else:
            print 'skip layer {:s}'.format(layer.name)
            continue
        if negative_slope is not None:
            one_layer_info['negative_slope'] = negative_slope
            negative_slope = None
        if is_bn == 2:
            all_layer_info[layer_names[-1]]['bn'] = True
            is_bn = 0
        one_layer_info['kernel_size'] = help_parse(param.kernel_size, None)
        one_layer_info['pad'] = help_parse(param.pad, 'pad')
        one_layer_info['stride'] = help_parse(param.stride, 'stride')
        layer_names.append(layer_name)
        all_layer_info[layer_name] = one_layer_info
    all_layer_info[layer_names[-1]]['bn'] = False
    return all_layer_info
示例#23
0
文件: convert.py 项目: yiiwood/apollo
def prototxt_to_forward(prototxt):
    net = caffe_pb2.NetParameter()
    with open(prototxt, 'r') as f:
        Merge(f.read(), net)
    if net.layer:
        net_layers = net.layer
    else:
        net_layers = net.layers
    layers = []
    for layer in net_layers:
        layers.append(parse_layer(layer))
    return layers
示例#24
0
def load_test_message_stream(path):
    """
    Loads a message stream. The message stream is a simple text file of
    protobuf messages sent and received during the course of a training or
    prediction run.

    The file is very simply formatted as a pairs of lines for each message:
    Line N  : SEND|RECV
    Line N+1: ServerToSimulator or SimulutorToServer protobuf object serialized
              to text form with google.protobuf.text_format.MessageToString(),
              or None indicating an empty message.

    This output file can be easily re-created with any of the gym sample
    simulators by simply adding a "--messages-out <PATH>" parameter to the
    command line.

    :param path: Path to test file to load.
    :type path: string
    :return: Array of TestMessage instances representing the back-and-forth
             communications between the simulator and its BRAIN.
    """
    with open(path, 'r') as infile:

        line_number = 0
        direction = None
        message_as_text = None
        message = None
        messages = []
        for line in infile:
            line_number += 1
            line = line.strip()
            if line_number % 2 == 1:
                direction = line
            else:
                message_as_text = line
                if message_as_text == 'None':
                    message = None
                elif direction == 'RECV':
                    message = ServerToSimulator()
                elif direction == 'SEND':
                    message = SimulatorToServer()
                else:
                    raise RuntimeError('Error loading file '
                                       'on line {}'.format(line_number))
                if message:
                    Merge(message_as_text, message)
                tst_msg = TestMessage(direction=direction,
                                      message_as_text=message_as_text,
                                      message=message)
                messages.append(tst_msg)

    return messages
示例#25
0
 def export(self):
     pipeline_config = TrainEvalPipelineConfig()
     Merge(self.config_path.read_text(), pipeline_config)
     last_ckpt = max(self.training_path.glob("model.ckpt-*.meta"),
                     key=_get_ckpt_number_from_file).with_suffix("")
     n_steps = last_ckpt.suffix.split("-")[-1]
     export_inference_graph(
         input_type="image_tensor",
         pipeline_config=pipeline_config,
         trained_checkpoint_prefix=str(last_ckpt),
         output_directory=str(PIPELINES_DIR / self.task /
                              f"{self.name}__{n_steps}_steps"),
     )
 def create_from_file(cls,
                      config_filename,
                      is_training=True,
                      inp=None,
                      label=None,
                      batch_size=None):
     config = ResnetModelConfig()
     Merge(open(config_filename).read(), config)
     return cls(config,
                is_training=is_training,
                inp=inp,
                label=label,
                batch_size=batch_size)
示例#27
0
    def test_pack(self):
        cartridge = Cartridge()
        Merge(CARTRIDGE_TEXT, cartridge)

        expected_packed_eeprom = bytearray(
            binascii.unhexlify(PACKED_CARTRIDGE_HEX))

        crypto = Desx_Crypto()
        checksum = Crc16_Checksum()
        manager = Manager(crypto, checksum)
        packed_eeprom = manager.pack(cartridge)

        assert expected_packed_eeprom == packed_eeprom
示例#28
0
def test_run_delegation(create_job, get_results):
    dt = datetime.datetime.now(tz=datetime.timezone.utc)
    create_job.return_value = (
        'steve',
        quantum.QuantumJob(
            name='projects/a/programs/b/jobs/steve',
            execution_status=quantum.ExecutionStatus(state=quantum.ExecutionStatus.State.SUCCESS),
            update_time=dt,
        ),
    )
    get_results.return_value = quantum.QuantumResult(
        result=util.pack_any(
            Merge(
                """sweep_results: [{
        repetitions: 4,
        parameterized_results: [{
            params: {
                assignments: {
                    key: 'a'
                    value: 1
                }
            },
            measurement_results: {
                key: 'q'
                qubit_measurement_results: [{
                  qubit: {
                    id: '1_1'
                  }
                  results: '\006'
                }]
            }
        }]
    }]
""",
                v2.result_pb2.Result(),
            )
        )
    )

    program = cg.EngineProgram('a', 'b', EngineContext())
    param_resolver = cirq.ParamResolver({})
    results = program.run(
        job_id='steve', repetitions=10, param_resolver=param_resolver, processor_ids=['mine']
    )

    assert results == cg.EngineResult(
        params=cirq.ParamResolver({'a': 1.0}),
        measurements={'q': np.array([[False], [True], [True], [False]], dtype=bool)},
        job_id='steve',
        job_finished_time=dt,
    )
示例#29
0
def create_header(prototxtFile):
    # Create the Caffe network parameter object
    net = caffe_pb2.NetParameter()

    # merge the given network with the object. This will read the text file and initialize all
    # parameters of the object with those from the text file.
    Merge((open(prototxtFile, 'r').read()), net)

    hfile = open(headerFileName, 'w')
    sfile = open(sourceFileName, 'w')
    hfile.write(startString)
    hfile.write(docString)
    sfile.write(docString)
    sfile.write("#include " + '"' + headerFileName + '"' + "\n\n")

    # find the total number of layers in the network. Input layer is not counted as it is a data layer.
    # hence -1
    NumCnnLayers = len(net.layer)
    hfile.write("#define NO_DEEP_LAYERS " + str(NumCnnLayers - 1) + "\n")

    # create header file writer object
    hw = DnnHeaderCreater(net, hfile, sfile)

    hw.parse_params()

    # make sure the structures are arranged in the layer connection format
    hw.order_structures()
    hfile.write("#define INPUT_IMG_WIDTH " + str(hw.inputWidth) + "\n")
    hfile.write("#define INPUT_IMG_HEIGHT " + str(hw.inputHeight) + "\n")
    hfile.write("#define NO_INPUT_MAPS " + str(hw.noInputMaps) + "\n")

    # write general struct definition to the header file
    hw.write_struct_definition(structName)

    # declare the array of structure
    hfile.write("extern " + "const " + structName + " " + arrayName +
                "[NO_DEEP_LAYERS];\n\n")
    hw.write_struct_array(structName)

    # write the header end string
    hfile.write(endString)
    hfile.write('\n')
    # print approx no of operations present in the network
    #hw.compute_no_ops()

    hfile.close()
    sfile.close()
    cprint('Generated source files successfully', 'green')
    cprint(
        'Copy {:s} and {:s} to the main project src and inc directories respectively.'
        .format(sourceFileName, headerFileName), 'green')
示例#30
0
def test_get_engine_device(get_processor):
    device_spec = _to_any(
        Merge(
            """
valid_gate_sets: [{
    name: 'test_set',
    valid_gates: [{
        id: 'x',
        number_of_qubits: 1,
        gate_duration_picos: 1000,
        valid_targets: ['1q_targets']
    }]
}],
valid_qubits: ['0_0', '1_1'],
valid_targets: [{
    name: '1q_targets',
    target_ordering: SYMMETRIC,
    targets: [{
        ids: ['0_0']
    }]
}]
""",
            v2.device_pb2.DeviceSpecification(),
        ))

    gate_set = cg.SerializableGateSet(
        gate_set_name='x_gate_set',
        serializers=[
            cg.GateOpSerializer(gate_type=cirq.XPowGate,
                                serialized_gate_id='x',
                                args=[])
        ],
        deserializers=[
            cg.GateOpDeserializer(serialized_gate_id='x',
                                  gate_constructor=cirq.XPowGate,
                                  args=[])
        ],
    )

    get_processor.return_value = qtypes.QuantumProcessor(
        device_spec=device_spec)
    device = cirq_google.get_engine_device('rainbow',
                                           'project',
                                           gatesets=[gate_set])
    assert set(device.qubits) == {cirq.GridQubit(0, 0), cirq.GridQubit(1, 1)}
    device.validate_operation(cirq.X(cirq.GridQubit(0, 0)))
    with pytest.raises(ValueError):
        device.validate_operation(cirq.X(cirq.GridQubit(1, 2)))
    with pytest.raises(ValueError):
        device.validate_operation(cirq.Y(cirq.GridQubit(0, 0)))