Exemplo n.º 1
0
 def Version_6(cls, ctx, node, **kwargs):
     scalar_val = (node.get_attr_value("int_operand")
                   if node.get_attr_value("has_int_operand") else
                   node.get_attr_value("float_operand"))
     np_dtype = util.Onnx2NumpyDtype(ctx.get_dtype(node.input[0]))
     scalar_node = ctx.MakeConst(id_util.UniqueStr("scalar"),
                                 np.array([scalar_val]).astype(np_dtype))
     node.input.append(scalar_node.output[0])
Exemplo n.º 2
0
 def Version_6(cls, ctx, node, **kwargs):
     scalar_val = (node.attrs["int_operand"]
                   if node.attrs["has_int_operand"] else
                   node.attrs["float_operand"])
     np_dtype = util.Onnx2NumpyDtype(
         ctx.get_dtype(node.input_tensor_names[0]))
     scalar_node = ctx.MakeConst(id_util.UniqueStr("scalar"),
                                 np.array([scalar_val]).astype(np_dtype))
     node.input_tensor_names.append(scalar_node.output_tensor_names[0])
Exemplo n.º 3
0
 def get_saved_tensor(self, node):
     tensor_name = node.output_tensor_names[0]
     # TODO(daquexian): node.output_tensor_names[0] is "node_name/output_name", so this pathjoin doesn't work
     # on windows (where path separator is "\")
     path = pathjoin(self._model_save_dir, node.output_tensor_names[0])
     tensor_value = np.fromfile(
         path, dtype=util.Onnx2NumpyDtype(self.get_dtype(tensor_name))
     ).reshape(self.get_shape(tensor_name))
     return tensor_value
Exemplo n.º 4
0
 def Version_11(cls, ctx, node, **kwargs):
     node.set_attr("mode", "constant")
     padding_before = node.get_attr_value("padding_before")
     padding_after = node.get_attr_value("padding_after")
     paddings = np.array(padding_before + padding_after).astype(np.int64)
     padding_node = ctx.MakeConst(id_util.UniqueStr("const"), paddings)
     node.input.append(padding_node.output[0])
     dtype = ctx.get_dtype(node.input[0])
     const_val = (node.get_attr_value("integral_constant_value")
                  if util.is_integral_onnx_dtype(dtype) else
                  node.get_attr_value("floating_constant_value"))
     const_val = np.array(const_val).astype(util.Onnx2NumpyDtype(dtype))
     const_val_node = ctx.MakeConst(id_util.UniqueStr("const"), const_val)
     node.input.append(const_val_node.output[0])
Exemplo n.º 5
0
    def Version_6(cls, ctx, node, **kwargs):
        node.op_type = "BatchNormalization"
        # flow inputs: x, gamma, beta, moving_mean, moving_variance
        # flow outputs: y, mean, inv_variance
        # a: data_format, epsilon, is_training
        # onnx inputs: X, scale, B, mean, variance, attributes: epsilon, momentum=0.9, spatial : 1
        # output: y, mean, var, savedmean, savedvar,
        # detach unused outputs. While we could let the unused outputs dangle,
        # some runtimes like pytorch/caffe2 do complain about it.
        if node.attrs["training"]:
            raise NotImplementedError(
                "We only support inference mode ONNX BatchNormalization now"
            )
        consumers = [
            ctx.FindOutputConsumers(output_name)
            for output_name in node.output_tensor_names[1:]
        ]
        if not any(consumers):
            new_output = [node.output_tensor_names[0]]
            node.output_tensor_names = new_output

        _ConvConvertInputs(ctx, node, with_kernel=False)

        scale_shape = ctx.get_shape(node.input_tensor_names[1])
        mean_shape = ctx.get_shape(node.input_tensor_names[3])
        var_shape = ctx.get_shape(node.input_tensor_names[4])
        val_type = util.Onnx2NumpyDtype(ctx.get_dtype(node.input_tensor_names[1]))

        if mean_shape != scale_shape:
            new_mean_value = np.array(
                np.resize(
                    node.input_nodes[3].get_tensor_value(as_list=False), scale_shape
                ),
                dtype=val_type,
            )
            new_mean_node_name = id_util.UniqueStr(node.name)
            ctx.MakeConst(new_mean_node_name, new_mean_value)
            node.input_tensor_names[3] = new_mean_node_name

        if var_shape != scale_shape:
            new_var_value = np.array(
                np.resize(
                    node.input_nodes[4].get_tensor_value(as_list=False), scale_shape
                ),
                dtype=val_type,
            )
            new_val_node_name = id_util.UniqueStr(node.name)
            ctx.MakeConst(new_val_node_name, new_var_value)
            node.input_tensor_names[4] = new_val_node_name