def expm1_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"): console.warning( "[TensorFlowConverter] In WebDNN, \"Expm1(x)\" is converted into \"Exp(x)-1\", which is not enough accurate as Expm1 when" "x is so small that \"Exp(x) == 1\" in floating point accuracy.") x = converter.get_variable(tf_op.inputs[0]) y = Exp(None)(x)[0] - 1 converter.set_variable(tf_op.outputs[0], y)
def _convert_logsumexp(converter: ChainerConverter, c_op: "chainer.functions.LogSumExp"): x = converter.get_variable(c_op.inputs[0]) if c_op.axis is None: axes = list(x.order.axes) else: axes = [x.order.axes[i] for i in c_op.axis] # TODO: Conversion result is wrong in case x.shape[category_axis] is placeholder. if any(not Placeholder.check_resolved(x.shape_dict[axis]) for axis in axes): raise NotImplementedError( "[ChainerConverter] \"LogSumExp\" for dynamic number of categories is not supported" ) max_x = x for axis in axes: max_x, = Max(None, axis=axis)(max_x) exp_delta_x, = Exp(None)(x - max_x) sum_exp_delta_x = exp_delta_x for axis in axes: sum_exp_delta_x, = Sum(None, axis=axis)(sum_exp_delta_x) y = Log(None)(sum_exp_delta_x)[0] + max_x converter.set_variable(c_op.outputs[0](), y)
def _convert_expm1(converter: ChainerConverter, c_op: "chainer.functions.Expm1"): console.warning( "[ChainerConverter] In WebDNN, \"Expm1(x)\" is converted into \"Exp(x)-1\", which is not enough accurate as Expm1 when" "x is so small that \"Exp(x) == 1\" in floating point accuracy.") x = converter.get_variable(c_op.inputs[0]) y = Exp(None)(x)[0] - 1 converter.set_variable(c_op.outputs[0](), y)
def _convert_selu(converter: ONNXConverter, onnx_op: INodeProto): x = converter.get_variable(onnx_op.input[0]) attrs = attribute_dict(onnx_op) alpha = attrs["alpha"].f if "alpha" in attrs else 1.6732 gamma = attrs["gamma"].f if "gamma" in attrs else 1.0507 y, = Select(None)(x > 0, gamma * x, gamma * (alpha * Exp(None)(x)[0] - alpha)) converter.set_variable(onnx_op.output[0], y)
def template(x_order=OrderNHWC, y_order=OrderNHWC, description: str = ""): vx = np.random.rand(2, 3, 4, 5) - 0.5 vy = np.exp(vx) x = Variable(vx.shape, order=OrderNHWC) y, = Exp(None)(x) x.change_order(x_order) y.change_order(y_order) generate_kernel_test_case( description=f"Exp {description}", graph=Graph([x], [y]), inputs={ x: np.transpose(vx, [OrderNHWC.axes_dict[a] for a in x.order.axes]) }, expected={ y: np.transpose(vy, [OrderNHWC.axes_dict[a] for a in y.order.axes]) }, )
def _convert_log_softmax(converter: ChainerConverter, c_op: "chainer.functions.LogSoftmax"): x = converter.get_variable(c_op.inputs[0]) axis = x.order.axes[1] max_x, = Max(None, axis=axis)(x) exp_delta_x, = Exp(None)(x - max_x) sum_exp_delta_x, = Sum(None, axis=axis)(exp_delta_x) log_sum_delta_exp, = Log(None)(sum_exp_delta_x) y = x - (log_sum_delta_exp + max_x) converter.set_variable(c_op.outputs[0](), y)
def _convert_log_softmax(converter: ChainerConverter, c_op: "chainer.functions.LogSoftmax"): x = converter.get_variable(c_op.inputs[0]) axis = x.order.axes[1] # TODO: Conversion result is wrong in case x.shape[1] is placeholder. if not Placeholder.check_resolved(x.shape[1]): raise NotImplementedError( "[ChainerConverter] \"LogSoftMax\" for dynamic number of cateogries is not supported" ) max_x, = Max(None, axis=axis)(x) exp_delta_x, = Exp(None)(x - max_x) sum_exp_delta_x, = Sum(None, axis=axis)(exp_delta_x) log_sum_delta_exp, = Log(None)(sum_exp_delta_x) y = x - (log_sum_delta_exp + max_x) converter.set_variable(c_op.outputs[0](), y)
def optimize(self, graph: Graph) -> Tuple[Graph, bool]: flag_changed = False for softmax in traverse.filter_nodes(traverse.listup_operators(graph), Softmax): x = softmax.inputs["x"] y = softmax.outputs["y"] axis = softmax.parameters["axis"] softmax.remove_all() flag_changed = True max_x, = Max(None, axis=axis)(x) delta_x = x - max_x exp_delta_x, = Exp(None)(delta_x) sum_exp_delta_x, = Sum(None, axis=axis)(exp_delta_x) new_y = exp_delta_x / sum_exp_delta_x new_y.change_order(y.order) OptimizeRule.replace_variable(graph, new_y, y) return graph, flag_changed
def _convert_logsumexp(converter: ChainerConverter, c_op: "chainer.functions.LogSumExp"): x = converter.get_variable(c_op.inputs[0]) if c_op.axis is None: axes = list(x.order.axes) else: axes = [x.order.axes[i] for i in c_op.axis] max_x = x for axis in axes: max_x, = Max(None, axis=axis)(max_x) exp_delta_x, = Exp(None)(x - max_x) sum_exp_delta_x = exp_delta_x for axis in axes: sum_exp_delta_x, = Sum(None, axis=axis)(sum_exp_delta_x) y = Log(None)(sum_exp_delta_x)[0] + max_x converter.set_variable(c_op.outputs[0](), y)
def _convert_reduce_logsumexp(converter: ONNXConverter, onnx_op: INodeProto): x = converter.get_variable(onnx_op.input[0]) attrs = attribute_dict(onnx_op) axes = [x.order.axes[i] for i in attrs["axes"].ints] keepdims = (attrs["keepdims"].i if "keepdims" in attrs else 1) == 1 max_x = x for axis in axes: max_x, = Max(None, axis=axis)(max_x) exp_delta_x, = Exp(None)(x - max_x) sum_exp_delta_x = exp_delta_x for axis in axes: sum_exp_delta_x, = Sum(None, axis=axis)(sum_exp_delta_x) y = Log(None)(sum_exp_delta_x)[0] + max_x if not keepdims: y = y.squeeze(axis=axes) converter.set_variable(onnx_op.output[0], y)
def _convert_exp(converter: ONNXConverter, onnx_op: INodeProto): x0 = converter.get_variable(onnx_op.input[0]) y, = Exp(None)(x0) converter.set_variable(onnx_op.output[0], y)
def expm1_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"): x = converter.get_variable(tf_op.inputs[0]) y, = Exp(None)(x) y = y - 1 converter.set_variable(tf_op.outputs[0], y)
def _convert_exp(converter: ChainerConverter, c_op: "chainer.functions.Exp"): x = converter.get_variable(c_op.inputs[0]) y, = Exp(None)(x) converter.set_variable(c_op.outputs[0](), y)