示例#1
0
    def _handle_registration_acknowledgement(self, message):
        """
        The server sends back an acknowledgement. Process that acknowledgement
        and send back a ready message to the server.
        :param message: An acknowledge register message.
        :type message: ServerToSimulator protobuf class
        :return: A ready message
        :rtype: SimulatorToServer protobuf message
        """
        if message.message_type != ServerToSimulator.ACKNOWLEDGE_REGISTER:
            error = 'Expected ACKNOWLEDGE_REGISTER but got {}'.format(
                MessageToString(message))
            raise RuntimeError(error)
        if not message.acknowledge_register_data:
            error = 'Missing data in ACKNOWLEDGE_REGISTER message {}'.format(
                MessageToString(message))
            raise RuntimeError(error)
        self._state = DriverState.ACTIVE
        self._base_protocol.handle_register_acknowledgement(
            message.acknowledge_register_data)

        # Difference between training and predicting is here... instead of
        # sending a READY, send an initial STATE.
        reply = SimulatorToServer()
        self._simulator_protocol.generate_state_message(reply)
        return reply
示例#2
0
 def command_loop():
     periodic.read()
     if count[0] % (2 * command_rate_hz) == 0:
         logger.info(
             'right: %s\nleft: %s',
             MessageToString(right_motor.get_state(), as_one_line=True),
             MessageToString(left_motor.get_state(), as_one_line=True),
         )
     right_motor.send_velocity_command(0.0)
     right_motor_aft.send_velocity_command(0.0)
     left_motor.send_velocity_command(0.0)
     left_motor_aft.send_velocity_command(0.0)
     count[0] += 1
示例#3
0
def ReLU_OPT_Create_Prototxt(original_prototxt_path, original_model_path,
                             optimized_prototxt_path):
    net_param = caffe_pb2.NetParameter()
    new_net_param = caffe_pb2.NetParameter()
    with open(original_prototxt_path, 'rt') as f:
        Parse(f.read(), net_param)
    for layer_idx in range(0, len(net_param.layer)):
        layer = net_param.layer[layer_idx]
        pre_layer = net_param.layer[layer_idx - 1]
        if layer.type == 'ReLU' and \
            (pre_layer.type == 'Convolution' or pre_layer.type == 'ConvolutionDepthwise' or pre_layer.type == 'DepthwiseConvolution' or \
                pre_layer.type == 'InnerProduct' or pre_layer.type == 'Eltwise'):
            if pre_layer.type == 'Convolution' or pre_layer.type == 'ConvolutionDepthwise' or pre_layer.type == 'DepthwiseConvolution':
                new_net_param.layer[-1].type = 'ConvolutionReLU'
            elif pre_layer.type == 'Eltwise':
                new_net_param.layer[-1].type = 'EltwiseReLU'
            else:
                new_net_param.layer[-1].type = 'InnerProductReLU'

            if layer.top[0] == layer.bottom[0]:
                continue
            else:
                new_net_param.layer[-1].top[0] = layer.top[0]
        else:
            new_net_param.layer.extend([layer])
    new_net_param.name = net_param.name
    with open(optimized_prototxt_path, 'wt') as f:
        f.write(MessageToString(new_net_param))
    print "ReLU OPT : Create Optimized Prototxt Done."
    print bcolors.OKGREEN + "ReLU OPT : Model at " + original_model_path + "." + bcolors.ENDC
    print bcolors.OKGREEN + "ReLU OPT : Prototxt at " + optimized_prototxt_path + "." + bcolors.ENDC
示例#4
0
 def _record(self, send_or_recv, message):
     self.recording_queue.put(send_or_recv)
     if message:
         body = MessageToString(message, as_one_line=True)
     else:
         body = 'None'
     self.recording_queue.put(body)
示例#5
0
def _print_single_message(data, unknown_fields):
    from pyatv.mrp.protobuf import ProtocolMessage

    parsed = ProtocolMessage()
    parsed.ParseFromString(data)
    output = MessageToString(parsed, print_unknown_fields=unknown_fields)
    print(output)
示例#6
0
 def test_grpc_call(self):
     with grpc.insecure_channel('localhost:' + self.port) as channel:
         stub = LanguageDetectStub(channel)
         request = Input()
         request.input = self.sentences
         response = stub.infer(request)
         print(MessageToString(response))
         # print(MessageToDict(response))
         print(response.language[0].sentence.encode('utf-8'))
         # self.assertEqual(response, '')
         request = Input()
         request.input = self.sentences_1
         response = stub.infer(request)
         # print(MessageToDict(response))
         print(response)
         # self.assertEqual(response, '')
         request = Input()
         request.input = self.sentences_2
         response = stub.infer(request)
         # print(MessageToDict(response))
         print(response)
         # self.assertEqual(response, '')
         request = Input()
         request.input = self.sentences_3
         response = stub.infer(request)
         # print(MessageToDict(response))
         print(response)
         # self.assertEqual(response, '')
         request = Input()
         request.input = self.sentences_4
         response = stub.infer(request)
         # print(MessageToDict(response))
         print(response)
def AFFine_OPT_Create_Prototxt(original_prototxt_path, optimized_prototxt_path):
    net_param = caffe_pb2.NetParameter()
    new_net_param = caffe_pb2.NetParameter()
    with open(original_prototxt_path, 'rt') as f:
        Parse(f.read(), net_param)
    layer_num = len(net_param.layer)

    parameter_layer_type = ['Convolution', 'InnerProduct']
    merge_layer_type = ['Scale', 'BatchNorm']

    for layer_idx in range(0, layer_num):
        layer = net_param.layer[layer_idx]
        if layer.type not in merge_layer_type:
            new_net_param.layer.extend([layer])
        else:
            if layer.type == 'Scale' and len(layer.bottom) != 1:
                # In case, scale layer has two bottom blob, then scale layer can't be merged into CONV/IP.
                new_net_param.layer.extend([layer])
            else:
                continue
        if layer.type in parameter_layer_type:
            if layer_idx+1 < layer_num:
                if net_param.layer[layer_idx+1].type in merge_layer_type and len(net_param.layer[layer_idx+1].bottom)==1:
                    # In case, scale layer has two bottom blob, then scale layer can't be merged into CONV/IP.
                    if layer.type == 'Convolution':
                        new_net_param.layer[-1].convolution_param.bias_term = True
                    else:
                        new_net_param.layer[-1].inner_product_param.bias_term = True
    new_net_param.name = net_param.name
    with open(optimized_prototxt_path, 'wt') as f:
        f.write(MessageToString(new_net_param))
    print "BN SCALE OPT : Create Optimized Prototxt Done."
    print bcolors.OKGREEN + "BN SCALE OPT : Prototxt at " + optimized_prototxt_path + "." + bcolors.ENDC
示例#8
0
文件: resconv.py 项目: jj4jj/resconv
def pbin_dump(path):
    name = os.path.basename(path)
    TBTypeName = name.split('.')[0]
    tb = getattr(res, TBTypeName)()
    buff = open(path, "r").read()
    tb.ParseFromString(buff)
    print(MessageToString(tb))
示例#9
0
def _replace_graph_node_names(graph, mapping):
    # regex, match all mapped name
    all_nodes_regex = re.compile(
        _node_name_regex_tpl.format('|'.join(mapping.keys())))

    # old graph text
    graph_text = MessageToString(graph)

    # replace all node name
    obfuscated_graph_text = io.StringIO()
    last_match_end = 0
    while True:
        match = all_nodes_regex.search(graph_text, last_match_end)
        if match is None:
            break

        # prefix
        match_beg, match_end = match.span('name')
        obfuscated_graph_text.write(graph_text[last_match_end:match_beg])
        last_match_end = match_end

        # node name
        node_name = graph_text[match_beg:match_end]
        obfuscated_graph_text.write(mapping.get(node_name, node_name))

    obfuscated_graph_text.write(graph_text[last_match_end:])

    obfuscated_graph = GraphDef()
    Parse(obfuscated_graph_text.getvalue(), obfuscated_graph)
    obfuscated_graph_text.close()
    return obfuscated_graph
示例#10
0
    def generate_state_message(self, message):

        message.message_type = SimulatorToServer.STATE
        message.sim_id = self._simulator_id
        state = self._simulator.get_state()

        if self._current_reward_name:
            reward = getattr(self._simulator, self._current_reward_name)()
        else:
            reward = 0.0

        log.debug('generate_state_message => state = %s', pformat(state))
        terminal = state.is_terminal
        state_message = self._output_schema()
        convert_state_to_proto(state_message, state.state)

        current_state_data = message.state_data.add()
        current_state_data.state = state_message.SerializeToString()
        current_state_data.reward = reward
        current_state_data.terminal = terminal

        # add action taken
        last_action = self._simulator.get_last_action()
        if last_action is not None:
            actions_msg = self._prediction_schema()
            convert_state_to_proto(actions_msg, last_action)
            current_state_data.action_taken = actions_msg.SerializeToString()
        if self._log_state_messages:
            log.debug('Generated simulator state %s', MessageToString(message))
示例#11
0
 def __init__(self, expected, message):
     super(UnexpectedMessageError, self).__init__(
         'Expected {} but got {}'.format(
             expected,
             MessageToString(message, as_one_line=True)
         )
     )
 def _record(self, send_or_recv, message):
     yield self.recording_queue.put(send_or_recv)
     if message:
         yield self.recording_queue.put(
             MessageToString(message, as_one_line=True))
     else:
         yield self.recording_queue.put('None')
示例#13
0
def Inpt_OPT_New_Weight(original_prototxt_path, original_model_path,
                        optimized_prototxt_path, new_model_path, scale):
    net_param = caffe_pb2.NetParameter()
    with open(original_prototxt_path, 'rt') as f:
        Parse(f.read(), net_param)
    layer_num = len(net_param.layer)
    input_layer_type = ['Data', 'Input', 'AnnotatedData']
    for layer_idx in range(0, layer_num):
        layer = net_param.layer[layer_idx]
        if layer.type not in input_layer_type:
            assert (layer.type == 'Convolution' or layer.type == 'InnerProduct'
                    ), "## ERROR : First Layer MUST BE CONV or IP. ##"
            target_layer_name = layer.name
            break
        else:
            try:
                net_param.layer[layer_idx].transform_param.scale = 1.0
            except:
                print bcolors.WARNING + "INPUT PREPROCESS (SCALE) OPT : ** WARNING ** NO SCALE found in DATA layer." + bcolors.ENDC

    new_net = caffe.Net(original_prototxt_path, str(original_model_path),
                        caffe.TEST)
    new_net.params[target_layer_name][0].data[
        ...] = new_net.params[target_layer_name][0].data[...] * scale
    new_net.save(new_model_path)

    with open(optimized_prototxt_path, 'wt') as f:
        f.write(MessageToString(net_param))

    print "INPUT PREPROCESS (SCALE) OPT : Merge Input Scale Done."
    print bcolors.OKGREEN + "INPUT PREPROCESS (SCALE) OPT : Model at " + new_model_path + "." + bcolors.ENDC
    print bcolors.OKGREEN + "INPUT PREPROCESS (SCALE) OPT : Prototxt at " + optimized_prototxt_path + "." + bcolors.ENDC
示例#14
0
 def work(self, thread_queue, handle):
     logger = get_logger("Worker")
     while True:
         if self._should_stop:
             logger.info("receive stop signal, stopping worker...")
             break
         try:
             ch, method, props, message = thread_queue.get_nowait()
         except QueueEmpty:
             logger.debug("worker got no work to do")
             time.sleep(0.5)
             continue
         try:
             handle(message)
         except Exception as e:
             # 如果处理消息出错就不会 ACK
             if self._message_type is not None:
                 parsed_message = self._message_type()
                 parsed_message.ParseFromString(message)
                 logger.exception(
                     "handle message %s error %s",
                     MessageToString(parsed_message, as_one_line=True),
                     e,
                 )
             else:
                 logger.exception("handle message %s error %s", message, e)
示例#15
0
文件: ipc.py 项目: isherman/core
    def _announce_recv(self):
        data, address = self._mc_recv_sock.recvfrom(_g_datagram_size)

        # Ignore self-announcements
        if address[1] == self._mc_send_sock.getsockname()[1]:
            return

        # Ignore non-local announcements
        if not host_is_local(address[0], address[1]):
            logger.warning('ignoring non-local announcement: %s:%s',
                           address[0], address[1])
            return

        announce = Announce()
        announce.ParseFromString(data)

        # Ignore faulty announcements
        if announce.host != '127.0.0.1' or announce.port != address[1]:
            logger.warning(
                'announcement does not match sender... rejecting %s',
                MessageToString(announce, as_one_line=True))
            return

        # Store the announcement
        announce.recv_stamp.GetCurrentTime()
        self._services['%s:%d' % (announce.host, announce.port)] = announce
        for q in self._announce_subscribers:
            q.put_nowait(announce)
示例#16
0
 def export_sparse_graph_v4(self, output_v4_dir, emb_dim, space_list, order=1, comm_hint_type='common'):
     from xdl.python.training.v4 import dense_input_conf_pb2, embed_dimension_xdl_code_pb2, f2id_pb2
     from google.protobuf.text_format import MessageToString
     embed_dim_list = embed_dimension_xdl_code_pb2.EmbedDimList()
     embed_dim_list.model_signature = str(long(time.mktime(datetime.datetime.utcnow().timetuple()) * 1000000))
     if order == 1:
         comm_block = embed_dim_list.dense_input.input_blocks.add()
         ncomm_block = embed_dim_list.dense_input.input_blocks.add()
     else:
         ncomm_block = embed_dim_list.dense_input.input_blocks.add()
         comm_block = embed_dim_list.dense_input.input_blocks.add()
     comm_block.name = 'comm'
     ncomm_block.name = 'ncomm'
     comm_block.hint_type = dense_input_conf_pb2.UNKNOWN if comm_hint_type == 'unknown' else dense_input_conf_pb2.COMMON
     ncomm_block.hint_type = dense_input_conf_pb2.UNCOMMON
     comm_index = 0
     ncomm_index = 0
     for i in xrange(len(graph_tag().sparse_list)):
         name = graph_tag().sparse_list[i]
         table = graph_tag().fea_dict[name]['table']
         embed_dim = embed_dim_list.embed_dim_list.add()
         embed_dim.embed_dim = emb_dim
         embed_dim.fea_groupid = name
         embed_dim.fea_group_global_offset = i + 1
         embed_dim.fea_type = "ncommon" if table == 0 else "common"
         embed_field = ncomm_block.embed_fields.add() if table == 0 else comm_block.embed_fields.add()
         if table == 0:
             embed_field.index = ncomm_index
             ncomm_index += 1
         else:
             embed_field.index = comm_index
             comm_index += 1
         embed_field.dim = emb_dim
         embed_field.fea_group_id = name
         embed_field.op = dense_input_conf_pb2.KSUM
     with open('embed-dim-xdl-code-conf', 'wb') as f:  # dense_input_conf.pb
         f.write(MessageToString(embed_dim_list))
     output(output_v4_dir, 'embed-dim-xdl-code-conf')
     f2id_list = f2id_pb2.F2IdList()
     for gid in xrange(len(space_list)):
         for feature_groupid in space_list[gid]:
             f2_id = f2id_list.item.add()
             f2_id.feature_groupid = feature_groupid
             f2_id.fid = gid
     with open('embed.best.meta', 'wb') as f:  # f2id.pb
         f.write(MessageToString(f2id_list))
     output(output_v4_dir, 'embed.best.meta')
示例#17
0
 def __init__(self, missing_field, message):
     super(MalformedMessageError, self).__init__(
         'Could not locate {} in message {} - got {}'.format(
             missing_field,
             type(message).__name__,
             MessageToString(message, as_one_line=True)
         )
     )
async def ipc_recv():
    uri = 'ws://localhost:8989'
    async with websockets.connect(uri) as websocket:
        while True:
            msg = await websocket.recv()
            event = Event()
            event.ParseFromString(msg)
            print(MessageToString(event, as_one_line=True))
示例#19
0
def Memo_OPT_Inplace_Memory(original_prototxt_path, original_model_path,
                            optimized_prototxt_path):
    inplace_operation_type = [
        'Scale', 'BatchNorm', 'ReLU', 'PReLU', 'Softmax', 'TanH', 'ELU',
        'Dropout'
    ]
    net_param = caffe_pb2.NetParameter()
    with open(original_prototxt_path, 'rt') as f:
        Parse(f.read(), net_param)
    layer_num = len(net_param.layer)
    parameter_blob_name = []
    blob_pair = {}
    for layer_idx in range(0, layer_num):
        layer = net_param.layer[layer_idx]
        if layer.type in inplace_operation_type and len(layer.bottom) == 1:
            if layer.bottom[0] in parameter_blob_name:
                if layer.bottom[0] != layer.top[0]:
                    # inplace opt
                    blob_pair[layer.top[0]] = layer.bottom[0]
                    print "MEMORY In-PLACE OPT : " + layer.name + " : Top Blob [" + layer.top[
                        0] + "] => [" + layer.bottom[0] + "]"
                    net_param.layer[layer_idx].top[0] = layer.bottom[0]
                else:
                    # optimized
                    continue
            else:
                if blob_pair.has_key(layer.bottom[0]):
                    # change bottom blob name
                    blob_pair[layer.top[0]] = blob_pair[layer.bottom[0]]
                    print "MEMORY In-PLACE OPT : " + layer.name + " : Top Blob [" + layer.top[
                        0] + "] => [" + blob_pair[layer.bottom[0]] + "]"
                    print "MEMORY In-PLACE OPT : " + layer.name + " : Bottom Blob [" + layer.bottom[
                        0] + "] => [" + blob_pair[layer.bottom[0]] + "]"
                    net_param.layer[layer_idx].top[0] = blob_pair[
                        layer.bottom[0]]
                    net_param.layer[layer_idx].bottom[0] = blob_pair[
                        layer.bottom[0]]
                else:
                    assert (
                        1 > 2
                    ), "MEMORY In-PLACE OPT : **ERROR** Should Not Reach Here. ##"
        else:
            for i in range(0, len(layer.top)):
                parameter_blob_name.append(layer.top[i])
            for i in range(0, len(layer.bottom)):
                if blob_pair.has_key(layer.bottom[i]):
                    print "MEMORY In-PLACE OPT : " + layer.name + " : Bottom Blob [" + layer.bottom[
                        i] + "] => [" + blob_pair[layer.bottom[i]] + "]"
                    net_param.layer[layer_idx].bottom[i] = blob_pair[
                        layer.bottom[i]]
                else:
                    continue
    with open(optimized_prototxt_path, 'wt') as f:
        f.write(MessageToString(net_param))
    # shutil.copyfile(original_model_path, optimized_model_path)
    print "MEMORY In-PLACE OPT : In-place Memory Optimization Done."
    print bcolors.OKGREEN + "MEMORY In-PLACE OPT : Model at " + original_model_path + "." + bcolors.ENDC
    print bcolors.OKGREEN + "MEMORY In-PLACE OPT : Prototxt at " + optimized_prototxt_path + "." + bcolors.ENDC
示例#20
0
    def handle_register_acknowledgement(self, message):
        log.debug('Processing acknowledgement %s', MessageToString(message))

        props_schema = message.properties_schema
        out_schema = message.output_schema
        pred_schema = message.prediction_schema
        self._properties_schema = MessageBuilder().reconstitute(props_schema)
        self._output_schema = MessageBuilder().reconstitute(out_schema)
        self._prediction_schema = MessageBuilder().reconstitute(pred_schema)
示例#21
0
def write_proto_to_file(proto, filename, binary=True):
    if binary:
        f = open(filename, "wb")
        f.write(proto.SerializeToString())
        f.close()
    else:
        f = open(filename, "w")
        f.write(MessageToString(proto))
        f.close()
示例#22
0
文件: ipc.py 项目: isherman/core
 def log_state(self):
     assert not self._recv_raw, 'EventBus initialized with recv_raw, no state.'
     if not self._subscriptions:
         logger.warning(
             f'{self._name} is not subscribed to any eventbus traffic')
     logger.info('\n'.join([
         MessageToString(value, as_one_line=True)
         for value in self._state.values()
     ]))
def save(sess, saver, global_step, config, save_folder):
    """Snapshots a model."""
    if not os.path.isdir(save_folder):
        os.makedirs(save_folder)
    config_file = os.path.join(save_folder, 'config.prototxt')
    with open(config_file, 'w') as f:
        f.write(MessageToString(config))
    log.info('Saving to {}'.format(save_folder))
    saver.save(sess, os.path.join(save_folder, 'model.ckpt'), global_step=global_step)
示例#24
0
    async def stream_wav_file(self, filename, volume=50):
        """ Plays audio using Vector's speakers.

        .. testcode::

            import anki_vector

            with anki_vector.Robot() as robot:
                robot.audio.stream_wav_file('../examples/sounds/vector_alert.wav')

        :param filename: the filename/path to the .wav audio file
        :param volume: the audio playback level (0-100)
        """

        # TODO make this support multiple simultaneous sound playback
        if self._is_active_event is None:
            self._is_active_event = asyncio.Event()

        if self._is_active_event.is_set():
            raise VectorExternalAudioPlaybackException(
                "Cannot start audio when another sound is playing")

        if volume < 0 or volume > 100:
            raise VectorExternalAudioPlaybackException(
                "Volume must be between 0 and 100")
        _file_reader, _file_params = self._open_file(filename)
        playback_error = None
        self._is_active_event.set()

        if self._done_event is None:
            self._done_event = asyncio.Event()

        try:
            async for response in self.grpc_interface.ExternalAudioStreamPlayback(
                    self._request_handler(_file_reader, _file_params, volume)):
                self.logger.info("ExternalAudioStream %s",
                                 MessageToString(response, as_one_line=True))
                response_type = response.WhichOneof("audio_response_type")
                if response_type == 'audio_stream_playback_complete':
                    playback_error = None
                elif response_type == 'audio_stream_buffer_overrun':
                    playback_error = response_type
                elif response_type == 'audio_stream_playback_failyer':
                    playback_error = response_type
                self._done_event.set()
        except asyncio.CancelledError:
            self.logger.debug('Audio Stream future was cancelled.')
        except futures.CancelledError:
            self.logger.debug('Audio Stream handler task was cancelled.')
        finally:
            self._is_active_event = None
            self._done_event = None

        if playback_error is not None:
            raise VectorExternalAudioPlaybackException(
                f"Error reported during audio playback {playback_error}")
示例#25
0
文件: resconv.py 项目: jj4jj/resconv
def convert(path, dstdir):
    name = os.path.basename(path)
    TBTypeName = name.split('_')[0]
    tb = read_table_from_xlsx(TBTypeName, path)
    buff = tb.SerializeToString()
    pbinf = '%s/%s.pbin' % (dstdir, TBTypeName)
    open(pbinf, "w").write(buff)
    ptxtf = '%s/%s.txt' % (dstdir, TBTypeName)
    open(ptxtf, "w").write(MessageToString(tb))
    logging.debug("convert pbin path:%s -> %s,%s ...", path, pbinf, ptxtf)
示例#26
0
def log_protobuf(logger, text, message):
    """Log protobuf message and shorten line length."""
    if logger.isEnabledFor(logging.DEBUG):
        override_length = int(environ.get("PYATV_PROTOBUF_MAX_LINE", 0))
        line_length = override_length or _PROTOBUF_LINE_LENGTH

        lines = MessageToString(message, print_unknown_fields=True).splitlines()
        msg_str = "\n".join([_shorten(x, line_length) for x in lines])

        logger.debug("%s: %s", text, msg_str)
示例#27
0
def save_config(config, save_folder):
    """Saves configuration to a file."""
    if not os.path.isdir(save_folder):
        os.makedirs(save_folder)
    config_file = os.path.join(save_folder, "config.prototxt")
    with open(config_file, "w") as f:
        f.write(MessageToString(config))
    cmd_file = os.path.join(save_folder, "cmd-{}.txt".format(time.time()))
    if not os.path.exists(cmd_file):
        with open(cmd_file, "w") as f:
            f.write(' '.join(sys.argv))
示例#28
0
    def _listen_for_services(self, n_periods):
        if self._mc_recv_sock is None:
            self._quiet_count += 1
            if self._quiet_count == 3:
                self._connect_recv_sock()
                logger.info('Listening for services.')
                self._quiet_count = 0
        else:
            self._close_recv_sock()
            logger.info('Resting for services.')
            delete = []
            for key, service in self._services.items():
                if (time.time() - service.recv_stamp.ToSeconds()) > 10:
                    logger.info('Dropping service: %s', MessageToString(service, as_one_line=True))
                    delete.append(key)
                else:
                    logger.info('Active service  : %s', MessageToString(service, as_one_line=True))

            for key in delete:
                del self._services[key]
示例#29
0
 def printMessage(self, status, msg):
     self.printStatus(status)
     result = (
         MessageToJson(msg)
         if self.opts.output_json
         else MessageToString(
             msg,
             pointy_brackets=True,
             as_utf8=True,
         ).rstrip()
     )
     info(result)
示例#30
0
def SendQuoteReplace(submitter, attrs):
    global responses, soid
    quote_id = GetQuoteId(attrs)

    buy_qty = raw_input("Buy Qty=> ")
    sell_qty = raw_input("Sell Qty=> ")
    buy_prc = raw_input("Buy Price=> ")
    sell_prc = raw_input("Sell Price=> ")

    quote_side_1=[
               {'qty':int(buy_qty),'side':1,'price':int(buy_prc)}, #orginal=32860
               {'qty':int(sell_qty),'side':2,'price':int(sell_prc)}  #orginal=32870
                 ]
    attrs = {
                 'instrument_id':2057683400770929841, #MCHZ7
                 'quote_side':quote_side_1,
                 'ord_type':enums.ORD_TYPE_LIMIT,
                 'time_in_force':enums.TIME_IN_FORCE_DAY,
                 'market_id':enums.TT_MARKET_ID_HKEX,
                 'connection_id': connection_id,
                 'user_id': user_id,
                 'account_id': account_id,
                 'account':account,
                 'quote_id':quote_id,
                 'source':enums.SOURCE_PYRATE,
                 'appl_id':PYRATE_APPLICATION_ID,
                 'aggressor_indicator':True,
                 'secondary_order_id': soid
            }
# Submit a replace quote
    msg = dict_to_protobuf(attrs, QuoteReplace)
    print '\nSent QuoteCancel {}\n'.format(quote_id, )  # submitter.send_topic)
#    print '\nSent QuoteReplace {} on {}\n'.format(quote_id, )  # submitter.send_topic)
    print msg
    submitter.send(msg)

# Wait for a response
    try:
        msgs = submitter.wait_for_response(Header.MSG_QUOTE_RESPONSE, wait_topic)
        print '\nQuoteResponse for QuoteReplace received:\n'
        responses = responses + submitter.order_responses[quote_id]

        soid = submitter.order_responses[quote_id][-1].order_response.secondary_order_id
        print MessageToString(submitter.order_responses[quote_id][-1].order_response)
        
        return True

    except:
        print 'Did not receive a QuoteReplace QuoteResponse from the OC, make sure its up!!'
        return False