コード例 #1
0
ファイル: optimizer.py プロジェクト: tkng/blueoil
    def _precompute_or_prune_inputs(self, node: Operator) -> None:
        """Precompute itself or prune the input nodes.

        If all input has precompute value, then make the node precompute.
        Otherwise, all prunable input nodes are pruned and substituted with
        a new constant node.
        """
        ops: List[Operator] = [
            node.input_ops[i] for i in node.input_names
            if node.input_ops.get(i)
        ]
        ops_have_precomp_values = list(
            map(lambda x: self._has_precompute_value(x), ops))
        ops_are_prunable = list(map(lambda x: self._is_prunable(x), ops))
        ops_are_in_quantized = list(
            map(lambda x: x.name in self._quantizers.keys(), ops))

        # check which input node can be pruned
        if reduce(lambda x, y: x and y,
                  ops_have_precomp_values):  # all input has concrete values
            node.run_forward()
            self._precomp_dic[node.name] = True  # this node can be pruned
            if reduce(lambda x, y: x or y, ops_are_in_quantized
                      ):  # some input operator to be quantized exists
                quantizers = {
                    op.name: self._quantizers[op.name]
                    for op in ops if self._quantizers.get(op.name)
                }
                if len(quantizers) > 1:
                    ValueError(
                        f'{node.name}: multiple quantized inputs with {node.op_type} are not supported.'
                    )
                self._quantizers[node.name] = list(quantizers.values())[0]

        else:
            self._precomp_dic[node.name] = False

            # prune input opetarots
            for key, op in zip(node.input_names, ops):
                if self._is_prunable(op):
                    # get scaling factor if it is to be quantized but not in hard quantization mode
                    scaling = 1 if self._quantizers.get(op.name) is None \
                        else self._quantizers[op.name].scaling_factor

                    extra_dims = tuple(
                        np.ones((len(op.data.shape) - len(scaling.shape)),
                                dtype=np.int32))
                    scaling = scaling.reshape(scaling.shape + extra_dims)

                    # creates new constant
                    new_op = Constant(op.name + '_new',
                                      op.dtype,
                                      op.data * scaling,
                                      dimension_format=op.dimension)

                    # replace and prune the old operators
                    node.add_input(key, new_op)
                    self._graph.add_op(new_op)
                    self._prune(op)
コード例 #2
0
ファイル: graph.py プロジェクト: wtnb93/blueoil
        def match(op1: Operator, op2: Operator) -> bool:
            if not op1.equals(op2):
                print(f'{op1.name} is different.')
                return False

            # check input nodes and further
            for i1, i2 in zip(op1.input_ops.values(), op2.input_ops.values()):
                if not match(i1, i2):
                    return False
            return True
コード例 #3
0
ファイル: feluda.py プロジェクト: tattle-made/tattle-api
 def __init__(self, configPath):
     self.config = config.load(configPath)
     if self.config.operators:
         self.operators = Operator(self.config.operators)
     if self.config.store:
         self.store = store.get_store(self.config.store)
     if self.config.queue:
         # print("---> 1", self.config.queue)
         self.queue = Queue.make(self.config.queue)
     if self.config.server:
         self.server = Server(self.config.server)
コード例 #4
0
ファイル: optimizer.py プロジェクト: tkng/blueoil
    def _set_dtype(self, node: Operator, qconv: List[Conv]) -> None:
        def before_qconv() -> bool:
            """Return if the node is before a quantized convolver"""
            convs: List[Conv] = self._output_convs[
                node] if self._output_convs.get(node) else []
            # consistency check
            is_qconv: List[bool] = list(map(lambda x: x in qconv, convs))
            all_is_qconv = reduce(lambda x, y: x and y, is_qconv, True)
            some_is_qconv = reduce(lambda x, y: x or y, is_qconv, False)
            assert convs == [] or (all_is_qconv == some_is_qconv), \
                f'{node.name} connects to both of a quantized convolver and non-quantized one.'

            return convs != [] and all_is_qconv

        def get_dtype() -> Optional[DataType]:
            """Return dtype along with which path the node is on: 'input' or 'weight' of a conv"""
            path = self._packed_input_path.get(node.name)
            return self._packed_dtype[path] if path is not None else None

        dtype = get_dtype()
        conv = self._output_convs.get(node)
        if dtype is not None and before_qconv():
            node.dtype = dtype
コード例 #5
0
ファイル: optimizer.py プロジェクト: tkng/blueoil
 def _check_and_transpose(self, node: Operator) -> None:
     perm = self._get_permutation(node.dimension)
     node.transpose(perm)