예제 #1
0
def upgrade_op(nf, in_n):
    if in_n.op_type == 'Slice' and len(in_n.input) == 1:
        # convert opset9 Slice to opset10
        with nf.scoped_prefix(in_n.name) as scoped_prefix:
            slice_inputs = [
                in_n.input[0],
                np.asarray(NodeFactory.get_attribute(in_n, 'starts')).astype(
                    np.int64),
                np.asarray(NodeFactory.get_attribute(in_n,
                                                     'ends')).astype(np.int64),
                np.asarray(NodeFactory.get_attribute(in_n,
                                                     'axes')).astype(np.int64)
            ]
            nf.make_node('Slice', slice_inputs, output_names=list(in_n.output))
        return True
    elif in_n.op_type == 'TopK' and len(in_n.input) == 1:
        # convert opset1 TopK to opset10
        with nf.scoped_prefix(in_n.name) as scoped_prefix:
            topk_inputs = [
                in_n.input[0],
                np.asarray([NodeFactory.get_attribute(in_n,
                                                      'k')]).astype(np.int64)
            ]
            nf.make_node('TopK',
                         topk_inputs,
                         {'axis': NodeFactory.get_attribute(in_n, 'axis', -1)},
                         output_names=list(in_n.output))
        return True
    else:
        return False
예제 #2
0
    def setup(self):
        if not self.is_source_built():
            print('Telos source either doesn\'t exist, or isn\'t initialized.')
            exit(2)

        self.wallet = Wallet(self.wallet_dir, self.keosd_dir)
        self.node_factory = NodeFactory(self.start_cwd, self.parent_dir, self.nodeos_dir, self.wallet)
        self.account_factory = AccountFactory(self.wallet, self.teclos_dir)
        self.boot_strapper = BootStrapper(self.telos_dir, self.teclos_dir, self.host_address, self.account_factory)
예제 #3
0
class Grow:

    def __init__(self):
        self.parent_dir = os.path.abspath(join(os.path.realpath(__file__), os.pardir))
        self.jsonConfig = json.loads(file_get_contents(join(self.parent_dir, "config/state.json")))

        self.start_cwd = os.getcwd()
        self.contracts_dir = "build/contracts"
        self.wallet_dir = join(self.parent_dir, 'wallet')
        self.host_address = self.jsonConfig['host_address'] if 'host_address' in self.jsonConfig and self.jsonConfig[
            'host_address'] == "" else "http://127.0.0.1:8888"

        self.git_tag = ''
        self.telos_dir = os.path.abspath('telos')
        if 'src-dir' in self.jsonConfig and self.jsonConfig['src-dir'] != '':
            self.telos_dir = os.path.abspath(self.jsonConfig['src-dir'])
        self.keosd_dir = join(self.telos_dir, "build/programs/tkeosd/tkeosd")
        self.teclos_dir = join(self.telos_dir, "build/programs/teclos/teclos")
        self.nodeos_dir = join(self.telos_dir, "build/programs/nodeos/nodeos")
        self.initializer = Initializer(self.telos_dir, self.start_cwd)

    def setup(self):
        if not self.is_source_built():
            print('Telos source either doesn\'t exist, or isn\'t initialized.')
            exit(2)

        self.wallet = Wallet(self.wallet_dir, self.keosd_dir)
        self.node_factory = NodeFactory(self.start_cwd, self.parent_dir, self.nodeos_dir, self.wallet)
        self.account_factory = AccountFactory(self.wallet, self.teclos_dir)
        self.boot_strapper = BootStrapper(self.telos_dir, self.teclos_dir, self.host_address, self.account_factory)

    def get_source_path(self):
        return self.telos_dir

    def set_source_path(self, path):
        self.jsonConfig['src-dir'] = os.path.abspath(path)
        self.save()

    def source_exists(self):
        return os.path.isdir(self.telos_dir)

    def is_source_built(self):
        return self.source_exists() and os.path.isdir(join(self.telos_dir, 'build'))

    def set_host_address(self, address):
        self.host_address = address

    def save(self):
        if hasattr(self, 'node_factory'):
            self.node_factory.save()
        self.jsonConfig['host_address'] = self.host_address
        create_file(join(self.parent_dir, 'config/state.json'), json.dumps(self.jsonConfig, sort_keys=True, indent=4))

    @staticmethod
    def get_chain_id(self):
        j = json.loads(get_output('teclos get info'))
        return j['chain-id']
예제 #4
0
def upgrade_slice_op(nf, in_n):
    # convert opset9 Slice to opset10
    assert len(in_n.input) == 1
    with nf.scoped_prefix(in_n.name) as scoped_prefix:
        slice_inputs = [
            in_n.input[0],
            np.asarray(NodeFactory.get_attribute(in_n,
                                                 'starts')).astype(np.int64),
            np.asarray(NodeFactory.get_attribute(in_n,
                                                 'ends')).astype(np.int64),
            np.asarray(NodeFactory.get_attribute(in_n,
                                                 'axes')).astype(np.int64)
        ]
        nf.make_node('Slice', slice_inputs, output_names=[in_n.output[0]])
예제 #5
0
 def _append_initializer_from_graph(graph):
     initializers = [i.name for i in graph.initializer]
     for node in graph.node:
         if node.op_type == 'Scan':  # currently only handle Scan
             subgraph = NodeFactory.get_attribute(node, 'body')
             initializers += _append_initializer_from_graph(subgraph)
     return initializers
예제 #6
0
    def _find_node_templates(self):
        node_tpls_root_dict = self.tpl_dict['topology_template']['node_templates']

        for node_name in node_tpls_root_dict.keys():
            node_type = node_tpls_root_dict[node_name]['type']
            print( "Found node template %s of type %s" % (node_name, node_type) )
            #if ("tosca.nodes.nfv.VNF" in node_type):
            #    # TODO: verify
            #    self.topology_templates[node_name] = TopologyTemplate(SubstitutionMappings.getSubTemplate(node_type))
            #else:
            self.node_templates[node_name] = NodeFactory.getNodeTemplate(node_name, node_type, node_tpls_root_dict[node_name])
예제 #7
0
    def _find_node_templates(self):
        node_tpls_root_dict = self.tpl_dict['topology_template'][
            'node_templates']

        for node_name in node_tpls_root_dict.keys():
            node_type = node_tpls_root_dict[node_name]['type']
            print("Found node template %s of type %s" % (node_name, node_type))
            #if ("tosca.nodes.nfv.VNF" in node_type):
            #    # TODO: verify
            #    self.topology_templates[node_name] = TopologyTemplate(SubstitutionMappings.getSubTemplate(node_type))
            #else:
            self.node_templates[node_name] = NodeFactory.getNodeTemplate(
                node_name, node_type, node_tpls_root_dict[node_name])
예제 #8
0
def handle_common_attributes(node, default_activations):
    direction = NodeFactory.get_attribute(node, 'direction')
    if direction:
        direction = str(direction, 'utf-8')
    else:
        direction = 'forward'
    num_directions = 2 if direction == 'bidirectional' else 1

    activations = NodeFactory.get_attribute(node, 'activations')
    if activations:
        activations = [
            str(x, 'utf-8').lower().capitalize() for x in activations
        ]
    else:
        activations = default_activations * num_directions

    activation_alpha = NodeFactory.get_attribute(node, 'activation_alpha')
    activation_beta = NodeFactory.get_attribute(node, 'activation_beta')
    clip_threshold = NodeFactory.get_attribute(node, 'clip')
    # TODO: support these activation attributes
    assert not activation_alpha
    assert not activation_beta
    assert not clip_threshold
    return direction, num_directions, activations
예제 #9
0
    def setup(self):

        if not self.isCdt():
            print('Grow requires that eosio.cdt be install')
            print('Please install the cdt and try again')
            exit(2)

        if not self.isNodeos():
            print(
                'Grow requires nodeos to be installed and in the path variable'
            )
            exit(2)

        if not self.isCleos():
            print(
                'Grow requires cleos to be installed and in the path variable')
            exit(2)

        if not self.isKeosd():
            print(
                'Grow requires keosd to be installed and in the path variable')
            exit(2)

        if not os.path.exists(self.contracts_dir):
            print(
                'eosio.contracts source either doesn\'t exist, or isn\'t initialized'
            )
            exit(2)

        self.wallet = Wallet(self.wallet_dir, self.cleos, self.keosd, 10000)
        self.node_factory = NodeFactory(self.start_cwd, self.parent_dir,
                                        self.nodeos, self.wallet)
        self.account_factory = AccountFactory(self.wallet, self.cleos)
        self.boot_strapper = BootStrapper(self.parent_dir, self.contracts_dir,
                                          self.cleos, self.host_address,
                                          self.account_factory)
예제 #10
0
def convert_rnn_to_scan(node, out_main_graph):
    assert node.op_type == 'RNN'
    nf = NodeFactory(out_main_graph)
    with nf.scoped_prefix(node.output[0]) as scoped_prefix:
        X = node.input[0]
        Wa = nf.get_initializer(node.input[1])
        Ra = nf.get_initializer(node.input[2])
        num_inputs = len(node.input)
        Ba = nf.get_initializer(node.input[3]) if num_inputs > 3 else None
        seq_len = node.input[4] if num_inputs > 4 else None
        InitHa = node.input[5] if num_inputs > 5 else None

        direction, num_directions, activations = handle_common_attributes(
            node, ['Tanh'])

        hidden_size = NodeFactory.get_attribute(node, 'hidden_size')

        InitHa = handle_init_state(InitHa, nf, num_directions)

        batch_size, batch_node = handle_batch_size(X, nf, InitHa is None)
        if InitHa is None:
            zero_init_state = default_init_state(X, batch_size, batch_node,
                                                 hidden_size, nf)

        scan_outputs = []
        scan_h_outputs = []
        for direction_index in range(num_directions):
            # for each direction
            # X [seq_len, batch_size, input_size]
            # W [hidden_size, input_size]
            # R [hidden_size, hidden_size]
            # B [2*hidden_size]
            # seq_len [batch_size]
            # init_h [batch_size, hidden_size]

            name_prefix = node.output[0] + '_' + str(direction_index) + '_'

            if InitHa is None:
                init_h = zero_init_state
            else:
                init_h = InitHa[direction_index]

            input_size = Wa.shape[len(Wa.shape) - 1]
            W_t = np.transpose(
                Wa[direction_index])  # [input_size, hidden_size]
            R_t = np.transpose(
                Ra[direction_index])  # [hidden_size, hidden_size]
            B = Ba[direction_index].reshape(2, hidden_size).sum(
                axis=0)  # [hidden_size]
            X_proj = nf.make_node('Add', [nf.make_node(
                'MatMul', [X, W_t]), B])  #[seq_len, batch_size, hidden_size]
            if num_directions == 1:
                is_backward = 0 if direction == 'forward' else 1
            else:
                is_backward = direction_index

            scan_body = onnx.GraphProto()
            scan_body.name = name_prefix + '_subgraph'

            nf_body = NodeFactory(out_main_graph, scan_body)
            with nf_body.scoped_prefix(name_prefix) as body_scoped_prefix:
                # subgraph inputs
                X_proj_subgraph = X_proj.name + '_subgraph'
                prev_h_subgraph = name_prefix + '_h_subgraph'

                seq_len_subgraph = declare_seq_len_in_subgraph(
                    seq_len, nf_body, X_proj.name, batch_size)

                nf_body.make_value_info(prev_h_subgraph,
                                        data_type=onnx.TensorProto.FLOAT,
                                        shape=(batch_size, hidden_size),
                                        usage=NodeFactory.ValueInfoType.input)

                nf_body.make_value_info(X_proj_subgraph,
                                        data_type=onnx.TensorProto.FLOAT,
                                        shape=(batch_size, hidden_size),
                                        usage=NodeFactory.ValueInfoType.input)
                # subgraph nodes
                # Ht = f(Xt*(W^T) + Ht-1*(R^T) + Wb + Rb)

                activation_f = activations[direction_index]
                Ht = nf_body.make_node(
                    activation_f,
                    nf_body.make_node('Add', [
                        nf_body.make_node('MatMul', [prev_h_subgraph, R_t]),
                        X_proj_subgraph
                    ]))

                subgraph_outputs = handle_subgraph_outputs(
                    nf_body, seq_len_subgraph, batch_size, hidden_size,
                    [(Ht, prev_h_subgraph)] + ([
                        (Ht, np.zeros(shape=(), dtype=np.float32))
                    ] if node.output[0] else []))

                scan = nf.make_node(
                    'Scan', ([seq_len] if seq_len else []) + [init_h, X_proj],
                    {
                        'body': scan_body,
                        'scan_input_directions': [is_backward],
                        'scan_output_directions': [is_backward],
                        'num_scan_inputs': 1
                    },
                    output_names=[
                        o.name
                        for o in subgraph_outputs[(0 if seq_len else 1):]
                    ])

                scan_h_outputs.append(subgraph_outputs[1])
                if node.output[0]:
                    scan_outputs.append(subgraph_outputs[2])

        handle_final_scan_outputs(node, nf, scan_outputs, [scan_h_outputs],
                                  num_directions)

    # remove old initializers
    nf.remove_initializer(node.input[1])
    nf.remove_initializer(node.input[2])
    if num_inputs > 3:
        nf.remove_initializer(node.input[3])
    if num_inputs > 5:
        nf.remove_initializer(node.input[5])
    return True
예제 #11
0
def convert_gru_to_scan(node, out_main_graph):
    assert node.op_type == 'GRU'
    nf = NodeFactory(out_main_graph)
    with nf.scoped_prefix(node.output[0]) as scoped_prefix:
        X = node.input[0]
        Wa = nf.get_initializer(node.input[1])
        Ra = nf.get_initializer(node.input[2])
        num_inputs = len(node.input)
        Ba = nf.get_initializer(node.input[3]) if num_inputs > 3 else None
        seq_len = node.input[4] if num_inputs > 4 else None
        InitHa = node.input[5] if num_inputs > 5 else None

        direction, num_directions, activations = handle_common_attributes(
            node, ['Sigmoid', 'Tanh'])

        hidden_size = NodeFactory.get_attribute(node, 'hidden_size')
        linear_before_reset = NodeFactory.get_attribute(
            node, 'linear_before_reset')
        InitHa = handle_init_state(InitHa, nf, num_directions)

        batch_size, batch_node = handle_batch_size(X, nf, InitHa is None)
        if InitHa is None:
            zero_init_state = default_init_state(X, batch_size, batch_node,
                                                 hidden_size, nf)

        scan_outputs = []
        scan_h_outputs = []
        for direction_index in range(num_directions):
            # for each direction
            # X [seq_len, batch_size, input_size]
            # W [3*hidden_size, input_size]
            # R [3*hidden_size, hidden_size]
            # B [6*hidden_size]
            # seq_len [batch_size]
            # init_h [batch_size, hidden_size]

            name_prefix = node.output[0] + '_' + str(direction_index) + '_'

            if InitHa is None:
                init_h = zero_init_state
            else:
                init_h = InitHa[direction_index]

            input_size = Wa.shape[len(Wa.shape) - 1]
            W_t = np.transpose(
                Wa[direction_index])  # [input_size, 3*hidden_size]
            R_t = np.transpose(
                Ra[direction_index])  # [hidden_size, 3*hidden_size]
            Rzr_t, Rh_t = np.hsplit(R_t, [
                2 * hidden_size
            ])  # [hidden_size, 2*hidden_size] and [hidden_size, hidden_size]
            Bzr, Bh = np.hsplit(
                Ba[direction_index].reshape(2, 3 * hidden_size),
                [2 * hidden_size])
            Bzr = Bzr.sum(axis=0)  # [2*hidden_size]
            Wbh = Bh[0]
            Rbh = Bh[1]
            X_proj = nf.make_node(
                'Add',
                [nf.make_node('MatMul', [X, W_t]),
                 np.concatenate(
                     (Bzr, Wbh))])  #[seq_len, batch_size, 3*hidden_size]
            if num_directions == 1:
                is_backward = 0 if direction == 'forward' else 1
            else:
                is_backward = direction_index

            scan_body = onnx.GraphProto()
            scan_body.name = name_prefix + '_subgraph'

            nf_body = NodeFactory(out_main_graph, scan_body)
            with nf_body.scoped_prefix(name_prefix) as body_scoped_prefix:
                # subgraph inputs
                X_proj_subgraph = X_proj.name + '_subgraph'
                prev_h_subgraph = name_prefix + '_h_subgraph'

                seq_len_subgraph = declare_seq_len_in_subgraph(
                    seq_len, nf_body, X_proj.name, batch_size)

                nf_body.make_value_info(prev_h_subgraph,
                                        data_type=onnx.TensorProto.FLOAT,
                                        shape=(batch_size, hidden_size),
                                        usage=NodeFactory.ValueInfoType.input)

                nf_body.make_value_info(X_proj_subgraph,
                                        data_type=onnx.TensorProto.FLOAT,
                                        shape=(batch_size, 3 * hidden_size),
                                        usage=NodeFactory.ValueInfoType.input)

                # subgraph nodes
                # zt = f(Xt*(Wz^T) + Ht-1*(Rz^T) + Wbz + Rbz)
                # rt = f(Xt*(Wr^T) + Ht-1*(Rr^T) + Wbr + Rbr)
                # ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh) # default, when linear_before_reset = 0
                # ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*(Rh^T) + Rbh)) + Wbh) # when linear_before_reset != 0
                # Ht = (1 - zt) (.) ht + zt (.) Ht-1

                split_X_outputs = ['split_Xzr', 'split_Xh']
                nf_body.make_node('Split',
                                  X_proj_subgraph, {
                                      "axis": 1,
                                      "split": [2 * hidden_size, hidden_size]
                                  },
                                  output_names=split_X_outputs)
                nf_body.make_value_info('split_Xzr',
                                        data_type=onnx.TensorProto.FLOAT,
                                        shape=(batch_size, 2 * hidden_size))
                nf_body.make_value_info('split_Xh',
                                        data_type=onnx.TensorProto.FLOAT,
                                        shape=(batch_size, hidden_size))

                activation_f, activation_g = activations[direction_index *
                                                         2:(direction_index +
                                                            1) * 2]

                if linear_before_reset:
                    prev_h_proj = nf_body.make_node('Add', [
                        nf_body.make_node('MatMul', [prev_h_subgraph, R_t]),
                        np.concatenate((np.zeros(2 * hidden_size).astype(
                            np.float32), Rbh))
                    ])
                    split_prev_h_outputs = ['split_Hzr', 'split_Hh']
                    nf_body.make_node(
                        'Split',
                        prev_h_proj, {
                            "axis": 1,
                            "split": [2 * hidden_size, hidden_size]
                        },
                        output_names=split_prev_h_outputs)
                    nf_body.make_value_info('split_Hzr',
                                            data_type=onnx.TensorProto.FLOAT,
                                            shape=(batch_size,
                                                   2 * hidden_size))
                    nf_body.make_value_info('split_Hh',
                                            data_type=onnx.TensorProto.FLOAT,
                                            shape=(batch_size, hidden_size))
                    ztrt = nf_body.make_node(
                        activation_f,
                        nf_body.make_node('Add', ['split_Hzr', 'split_Xzr']))
                    split_ztrt_outputs = ['split_zt', 'split_rt']
                    nf_body.make_node('Split',
                                      ztrt, {
                                          "axis": 1,
                                          "split": [hidden_size, hidden_size]
                                      },
                                      output_names=split_ztrt_outputs)
                    nf_body.make_value_info('split_zt',
                                            data_type=onnx.TensorProto.FLOAT,
                                            shape=(batch_size, hidden_size))
                    nf_body.make_value_info('split_rt',
                                            data_type=onnx.TensorProto.FLOAT,
                                            shape=(batch_size, hidden_size))
                    ht = nf_body.make_node(
                        activation_g,
                        nf_body.make_node('Add', [
                            nf_body.make_node('Mul', ['split_rt', 'split_Hh']),
                            'split_Xh'
                        ]))
                else:
                    ztrt = nf_body.make_node(
                        activation_f,
                        nf_body.make_node('Add', [
                            nf_body.make_node('MatMul',
                                              [prev_h_subgraph, Rzr_t]),
                            'split_Xzr'
                        ]))
                    split_ztrt_outputs = ['split_zt', 'split_rt']
                    nf_body.make_node('Split',
                                      ztrt, {
                                          "axis": 1,
                                          "split": [hidden_size, hidden_size]
                                      },
                                      output_names=split_ztrt_outputs)
                    nf_body.make_value_info('split_zt',
                                            data_type=onnx.TensorProto.FLOAT,
                                            shape=(batch_size, hidden_size))
                    nf_body.make_value_info('split_rt',
                                            data_type=onnx.TensorProto.FLOAT,
                                            shape=(batch_size, hidden_size))
                    ht = nf_body.make_node(
                        activation_g,
                        nf_body.make_node('Add', [
                            nf_body.make_node('MatMul', [
                                nf_body.make_node(
                                    'Mul', [prev_h_subgraph, 'split_rt']), Rh_t
                            ]), 'split_Xh'
                        ]))

                Ht = nf_body.make_node('Add', [
                    nf_body.make_node('Mul', [
                        nf_body.make_node('Sub', [
                            np.asarray([1]).astype(np.float32), 'split_zt'
                        ]), ht
                    ]),
                    nf_body.make_node('Mul', ['split_zt', prev_h_subgraph])
                ])

                subgraph_outputs = handle_subgraph_outputs(
                    nf_body, seq_len_subgraph, batch_size, hidden_size,
                    [(Ht, prev_h_subgraph)] + ([
                        (Ht, np.zeros(shape=(), dtype=np.float32))
                    ] if node.output[0] else []))

                scan = nf.make_node(
                    'Scan', ([seq_len] if seq_len else []) + [init_h, X_proj],
                    {
                        'body': scan_body,
                        'scan_input_directions': [is_backward],
                        'scan_output_directions': [is_backward],
                        'num_scan_inputs': 1
                    },
                    output_names=[
                        o.name
                        for o in subgraph_outputs[(0 if seq_len else 1):]
                    ])

                scan_h_outputs.append(subgraph_outputs[1])
                if node.output[0]:
                    scan_outputs.append(subgraph_outputs[2])

        handle_final_scan_outputs(node, nf, scan_outputs, [scan_h_outputs],
                                  num_directions)

    # remove old initializers
    nf.remove_initializer(node.input[1])
    nf.remove_initializer(node.input[2])
    if num_inputs > 3:
        nf.remove_initializer(node.input[3])
    if num_inputs > 5:
        nf.remove_initializer(node.input[5], allow_empty=True)
    return True
예제 #12
0
def convert_lstm_to_scan(node, out_main_graph):
    assert node.op_type == 'LSTM'
    nf = NodeFactory(out_main_graph)
    with nf.scoped_prefix(node.output[0]) as scoped_prefix:
        X = node.input[0]
        Wa = nf.get_initializer(node.input[1])
        Ra = nf.get_initializer(node.input[2])
        num_inputs = len(node.input)
        Ba = nf.get_initializer(node.input[3]) if num_inputs > 3 else None
        seq_len = node.input[4] if num_inputs > 4 else None
        InitHa = node.input[5] if num_inputs > 5 else None
        InitCa = node.input[6] if num_inputs > 6 else None
        PB = node.input[7] if num_inputs > 7 else None

        # TODO: support peephole
        assert not PB

        direction, num_directions, activations = handle_common_attributes(
            node, ['Sigmoid', 'Tanh', 'Tanh'])

        hidden_size = NodeFactory.get_attribute(node, 'hidden_size')
        input_forget = NodeFactory.get_attribute(node, 'input_forget')

        # TODO: implement input_forget = 1
        assert not (input_forget != None and input_forget == 1)

        # split initializer if needed:
        is_same_init = InitHa == InitCa
        InitHa = handle_init_state(InitHa, nf, num_directions)
        if is_same_init:
            InitCa = InitHa
        else:
            InitCa = handle_init_state(InitCa, nf, num_directions)

        batch_size, batch_node = handle_batch_size(
            X, nf, InitHa is None or InitCa is None)

        scan_outputs = []
        scan_h_outputs = []
        scan_c_outputs = []
        for direction_index in range(num_directions):
            # for each direction
            # X [seq_len, batch_size, input_size]
            # W [4*hidden_size, input_size]
            # R [4*hidden_size, hidden_size]
            # B [8*hidden_size]
            # seq_len [batch_size]
            # init_h [batch_size, hidden_size]
            # init_c [batch_size, hidden_size]
            # PB [3*hidden_size]

            name_prefix = node.output[0] + '_' + str(direction_index) + '_'

            if InitHa is None:
                init_h = default_init_state(X, batch_size, batch_node,
                                            hidden_size, nf, '_H')
            else:
                init_h = InitHa[direction_index]

            if InitCa is None:
                init_c = default_init_state(X, batch_size, batch_node,
                                            hidden_size, nf, '_C')
            else:
                init_c = InitCa[direction_index]

            input_size = Wa.shape[len(Wa.shape) - 1]
            Wt = np.transpose(Wa[direction_index])
            Rt = np.transpose(Ra[direction_index])
            B = Ba[direction_index].reshape(2, 4 * hidden_size).sum(
                axis=0)  # [4*hidden_size]
            X_proj = nf.make_node(
                'MatMul', [X, Wt])  #[seq_len, batch_size, 4*hidden_size]
            X_proj = nf.make_node('Add', [X_proj, B])
            if num_directions == 1:
                is_backward = 0 if direction == 'forward' else 1
            else:
                is_backward = direction_index

            scan_body = onnx.GraphProto()
            scan_body.name = name_prefix + '_subgraph'

            nf_body = NodeFactory(out_main_graph, scan_body)
            with nf_body.scoped_prefix(name_prefix) as body_scoped_prefix:
                # subgraph inputs
                X_proj_subgraph = X_proj.name + '_subgraph'
                prev_h_subgraph = name_prefix + '_h_subgraph'
                prev_c_subgraph = name_prefix + '_c_subgraph'

                seq_len_subgraph = declare_seq_len_in_subgraph(
                    seq_len, nf_body, X_proj.name, batch_size)

                for subgraph_i in [prev_h_subgraph, prev_c_subgraph]:
                    nf_body.make_value_info(
                        subgraph_i,
                        data_type=onnx.TensorProto.FLOAT,
                        shape=(batch_size, hidden_size),
                        usage=NodeFactory.ValueInfoType.input)

                nf_body.make_value_info(X_proj_subgraph,
                                        data_type=onnx.TensorProto.FLOAT,
                                        shape=(batch_size, 4 * hidden_size),
                                        usage=NodeFactory.ValueInfoType.input)
                # subgraph nodes
                # it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi)
                # ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf)
                # ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc)
                # Ct = ft (.) Ct-1 + it (.) ct
                # ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo)
                # Ht = ot (.) h(Ct)
                prev_h_proj = nf_body.make_node('MatMul',
                                                [prev_h_subgraph, Rt])
                sum_x_proj_h_proj_bias = nf_body.make_node(
                    'Add', [X_proj_subgraph, prev_h_proj])
                split_outputs = ['split_i', 'split_o', 'split_f', 'split_c']
                nf_body.make_node('Split',
                                  sum_x_proj_h_proj_bias, {
                                      "axis": 1,
                                      "split": [hidden_size] * 4
                                  },
                                  output_names=split_outputs)
                # manually add shape inference to split outputs
                for split_o in split_outputs:
                    nf_body.make_value_info(split_o,
                                            data_type=onnx.TensorProto.FLOAT,
                                            shape=(batch_size, hidden_size))
                activation_f, activation_g, activation_h = activations[
                    direction_index * 3:(direction_index + 1) * 3]
                it = nf_body.make_node(activation_f, 'split_i')
                ft = nf_body.make_node(activation_f, 'split_f')
                ct = nf_body.make_node(activation_g, 'split_c')
                c_subgraph = nf_body.make_node('Add', [
                    nf_body.make_node('Mul', [ft, prev_c_subgraph]),
                    nf_body.make_node('Mul', [it, ct])
                ])
                ot = nf_body.make_node(activation_f, 'split_o')
                h_subgraph = nf_body.make_node(
                    'Mul',
                    [ot, nf_body.make_node(activation_h, c_subgraph)])

                subgraph_outputs = handle_subgraph_outputs(
                    nf_body, seq_len_subgraph, batch_size, hidden_size,
                    [(h_subgraph, prev_h_subgraph),
                     (c_subgraph, prev_c_subgraph)] +
                    ([(h_subgraph,
                       np.zeros(shape=(), dtype=np.float32))] if node.output[0]
                     else []))  # skip scan output if node.output[0] is empty

                scan = nf.make_node(
                    'Scan',
                    ([seq_len] if seq_len else []) + [init_h, init_c, X_proj],
                    {
                        'body': scan_body,
                        'scan_input_directions': [is_backward],
                        'scan_output_directions': [is_backward],
                        'num_scan_inputs': 1
                    },
                    output_names=[
                        o.name
                        for o in subgraph_outputs[(0 if seq_len else 1):]
                    ])

                scan_h_outputs.append(subgraph_outputs[1])
                scan_c_outputs.append(subgraph_outputs[2])
                if node.output[0]:
                    scan_outputs.append(subgraph_outputs[3])

        handle_final_scan_outputs(node, nf, scan_outputs,
                                  [scan_h_outputs, scan_c_outputs],
                                  num_directions)

    # remove old initializers
    nf.remove_initializer(node.input[1])
    nf.remove_initializer(node.input[2])
    if num_inputs > 3:
        nf.remove_initializer(node.input[3])
    if num_inputs > 5:
        nf.remove_initializer(node.input[5], allow_empty=True)
    if num_inputs > 6:
        nf.remove_initializer(node.input[6], allow_empty=True)
    return True
예제 #13
0
class Grow:
    def __init__(self):
        self.parent_dir = os.path.abspath(
            join(os.path.realpath(__file__), os.pardir))
        self.jsonConfig = json.loads(
            file_get_contents(join(self.parent_dir, "config/state.json")))

        self.start_cwd = os.getcwd()
        self.wallet_dir = join(self.parent_dir, 'wallet')
        self.host_address = self.jsonConfig[
            'host_address'] if 'host_address' in self.jsonConfig and self.jsonConfig[
                'host_address'] == "" else "http://127.0.0.1:8888"

        self.contracts_dir = ''
        if 'contract-path' in self.jsonConfig and self.jsonConfig[
                'contract-path'] != '':
            self.contracts_dir = self.jsonConfig['contract-path']

        self.keosd = "keosd"
        self.cleos = "cleos"
        self.nodeos = "nodeos"

    def setup(self):

        if not self.isCdt():
            print('Grow requires that eosio.cdt be install')
            print('Please install the cdt and try again')
            exit(2)

        if not self.isNodeos():
            print(
                'Grow requires nodeos to be installed and in the path variable'
            )
            exit(2)

        if not self.isCleos():
            print(
                'Grow requires cleos to be installed and in the path variable')
            exit(2)

        if not self.isKeosd():
            print(
                'Grow requires keosd to be installed and in the path variable')
            exit(2)

        if not os.path.exists(self.contracts_dir):
            print(
                'eosio.contracts source either doesn\'t exist, or isn\'t initialized'
            )
            exit(2)

        self.wallet = Wallet(self.wallet_dir, self.cleos, self.keosd, 10000)
        self.node_factory = NodeFactory(self.start_cwd, self.parent_dir,
                                        self.nodeos, self.wallet)
        self.account_factory = AccountFactory(self.wallet, self.cleos)
        self.boot_strapper = BootStrapper(self.parent_dir, self.contracts_dir,
                                          self.cleos, self.host_address,
                                          self.account_factory)

    def isCdt(self):
        output = get_output('eosio-cpp --version')
        return re.match('eosio-cpp version (!?[0-9].[0-9](.[0-9])?)', output)

    def isNodeos(self):
        output = get_output('nodeos --version')
        return re.match('v(!?[0-9].[0-9](.[0-9])?)', output)

    def isKeosd(self):
        output = get_output('keosd --version')
        return re.match('v(!?[0-9].[0-9](.[0-9])?)', output)

    def isCleos(self):
        output = get_output('cleos version client')
        return re.match('Build version: ', output)

    def set_source_path(self, contract_path):
        self.jsonConfig['contract-path'] = os.path.abspath(contract_path)
        self.save()

    def get_contracts_path(self):
        return self.contracts_dir

    def set_host_address(self, address):
        self.host_address = address

    def save(self):
        if hasattr(self, 'node_factory'):
            self.node_factory.save()
        self.jsonConfig['host_address'] = self.host_address
        create_file(join(self.parent_dir, 'config/state.json'),
                    json.dumps(self.jsonConfig, sort_keys=True, indent=4))

    @staticmethod
    def get_chain_id():
        j = json.loads(get_output('teclos get info'))
        return j['chain-id']
예제 #14
0
def convert_matmul_model(input_model,
                         output_model,
                         only_for_scan=False,
                         share_input_quantization=False,
                         preset_str='asymm8_param0_input1',
                         qcfg_json=None,
                         export_qcfg_json=None):
    preset_qcfgs = {
        'asymm8_param0_input1': {
            'W': dict(QuantizeConfig(signed=1, reserved_bits=0, type_bits=8)),
            'X': dict(QuantizeConfig(signed=0, reserved_bits=1, type_bits=8)),
            'Symmetric': 0
        },
        'symm16_param3_input3': {
            'W': dict(QuantizeConfig(signed=1, reserved_bits=3, type_bits=16)),
            'X': dict(QuantizeConfig(signed=1, reserved_bits=3, type_bits=16)),
            'Symmetric': 1
        }
    }
    default_qcfg = preset_qcfgs[preset_str]
    in_mp = onnx.load(input_model)

    qcfg_dict = {}
    if qcfg_json and not export_qcfg_json:
        with open(qcfg_json, 'r') as f:
            qcfg_dict = json.load(f)

    out_mp = onnx.ModelProto()
    out_mp.CopyFrom(in_mp)
    out_mp.ir_version = 5  # update ir version to avoid requirement of initializer in graph input
    ensure_opset(
        out_mp,
        10)  # bump up to ONNX opset 10, which is required for MatMulInteger
    ensure_opset(out_mp, 1,
                 'com.microsoft')  # add MS domain for MatMulInteger16
    out_mp.graph.ClearField('node')
    nf = NodeFactory(out_mp.graph)
    converted_weights = {
    }  # remember MatMul weights that have been converted, in case of sharing
    quantized_inputs = {} if share_input_quantization else None  # remember quantized inputs that might be able to share between MatMuls
    for in_n in in_mp.graph.node:
        if in_n.op_type == 'Slice' and len(in_n.input) == 1:
            upgrade_slice_op(nf, in_n)
            continue

        if in_n.op_type == 'MatMul' and not only_for_scan:
            if quantize_matmul_2d_with_weight(in_n, in_mp.graph, nf,
                                              converted_weights,
                                              quantized_inputs, qcfg_dict,
                                              export_qcfg_json, default_qcfg):
                continue

        out_n = out_mp.graph.node.add()
        out_n.CopyFrom(in_n)
        if in_n.op_type == 'Scan':
            in_subgraph = NodeFactory.get_attribute(in_n, 'body')
            out_subgraph = NodeFactory.get_attribute(out_n, 'body')
            out_subgraph.ClearField('node')
            scan_nf = NodeFactory(out_mp.graph, out_subgraph)
            subgraph_quantized_inputs = {} if share_input_quantization else None  # remember quantized inputs that might be able to share between MatMuls
            for in_sn in in_subgraph.node:
                if in_sn.op_type == 'MatMul':
                    if quantize_matmul_2d_with_weight(
                            in_sn, in_subgraph, scan_nf, converted_weights,
                            subgraph_quantized_inputs, qcfg_dict,
                            export_qcfg_json, default_qcfg):
                        continue

                if in_sn.op_type == 'Slice' and len(in_sn.input) == 1:
                    upgrade_slice_op(scan_nf, in_sn)
                    continue

                out_sn = out_subgraph.node.add()
                out_sn.CopyFrom(in_sn)

    onnx.save(out_mp, output_model)
    if export_qcfg_json:
        with open(qcfg_json, 'w') as f:
            f.write(json.dumps(qcfg_dict, indent=2))