Пример #1
0
def _convert_max(converter: ChainerConverter, c_op: "chainer.functions.Max"):
    x = converter.get_variable(c_op.inputs[0])
    for axis in list(x.order.axes) if c_op.axis is None else [
            x.order.axes[i] for i in c_op.axis
    ]:
        x, = Max(None, axis=axis)(x)

        if not c_op.keepdims and x.ndim > 1:
            x = x.squeeze(axis)

    converter.set_variable(c_op.outputs[0](), x)
Пример #2
0
def _convert_reduce_max(converter: ONNXConverter, onnx_op: INodeProto):
    x = converter.get_variable(onnx_op.input[0])

    attrs = attribute_dict(onnx_op)
    axes = attrs["axes"].ints
    keepdims = (attrs["keepdims"].i if "keepdims" in attrs else 1) == 1
    for a in axes:
        x, = Max(None, axis=x.order.axes[a])(x)

    if not keepdims:
        x = x.squeeze(axis=[x.order.axes[i] for i in axes])

    converter.set_variable(onnx_op.output[0], x)
Пример #3
0
def max_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    axis = converter.get_variable(tf_op.inputs[1])
    assert isinstance(
        axis, ConstantVariable
    ), "[TensorFlowConverter] Operation 'Max' with dynamic axis  is not supported yet."

    for axis in [
            x.order.axes[i] for i in axis.data.astype(int).flatten().tolist()
    ]:
        x, = Max(None, axis=axis)(x)

        if not tf_op.get_attr("keep_dims") and x.ndim > 1:
            x = x.squeeze(axis)

    converter.set_variable(tf_op.outputs[0], x)