Пример #1
0
 def build(x):
     return mb.softmax(x=x, axis=0)
Пример #2
0
 def build(x):
     return mb.leaky_relu(x=x, alpha=2.0)
Пример #3
0
 def test_builder_eval(self):
     x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32)
     v = mb.linear_activation(x=x_val, alpha=2.0, beta=3.0)
     np.testing.assert_allclose(x_val * 2.0 + 3.0, v.val, atol=1e-04, rtol=1e-05)
Пример #4
0
    def _try_to_transform(self, conv_op, add_op, block):
        if conv_op in self.ops_to_skip or add_op in self.ops_to_skip:
            return False

        if add_op.op_type == "sub":
            bias_var = add_op.y
        else:
            bias_var = add_op.x if add_op.x.val is not None else add_op.y
        bias_value = bias_var.val

        is_conv_op = (conv_op.op_type == "conv")

        # check that the bias value is a constant array or a scalar constant
        if not isinstance(bias_value, (np.ndarray, np.generic)):
            return False

        is_bias_scalar = False
        if not isinstance(bias_value, np.ndarray):
            is_bias_scalar = True

        # find rank of the conv input
        rank = conv_op.x.rank
        if rank is None:
            return False
        if not (rank == 3 or rank == 4 or rank == 5):
            return False

        # check compatibility of bias value with the rank of the conv op
        # either bias value should be a scalar or:
        # rank=3 ==> (B,C,D), which means bias must be (1,C,1) or (C,1)
        # rank=4 ==> (B,C,D1,D2), which means bias must be (1,C,1,1) or (C,1,1)
        # rank=5 ==> (B,C,D1,D2,D3), which means bias must be (1,C,1,1,1) or (C,1,1,1)

        if is_bias_scalar:
            bias_value = np.array([bias_value])
        else:
            # check that there is at most one dimension in the shape that is not 1
            if len(np.squeeze(bias_value).shape) > 1:
                return False
            # check that addition is not happening on the batch dimension
            if len(bias_value) == rank:
                if bias_value.shape[0] != 1:
                    return False
            # check that last rank-2 entries in the shape vector are all 1s
            if np.prod(bias_value.shape[-(rank - 2):]) != 1:
                return False
            bias_value = np.squeeze(bias_value)

        if add_op.op_type == "sub":
            bias_value *= -1

        # everything looks good, now find the new updated bias
        old_bias = conv_op.inputs.get("bias", None)
        old_bias_value = None
        if old_bias is not None and old_bias.val is not None:
            old_bias_value = old_bias.val
        if old_bias is None:
            # need to create a fresh numpy array for bias
            if np.prod(bias_value.shape) == 1:
                # its a scalar bias
                # need to find the value of Cout to form a new bias
                if conv_op.weight.val is None:
                    return False
                # conv_transpose has weight format [K, C_out, spatial dims]
                # conv has weight format [C_out, K, spatial dims]
                Cout = conv_op.weight.val.shape[0 if is_conv_op else 1]
                new_bias_value = np.broadcast_to(bias_value, (Cout, ))
            else:
                new_bias_value = bias_value
        else:
            # just need to update the existing bias array
            try:
                new_bias_value = old_bias_value + bias_value
            except:
                return False

        # create a new conv op with the new bias value, copying rest of the attributes
        out_name = add_op.outputs[0].name
        if new_bias_value.dtype != np.float32 and new_bias_value.dtype != np.float16:
            # cast the bias to match the weight type
            weight_np_type = types.nptype_from_builtin(
                conv_op.inputs["weight"].sym_type.get_primitive())
            logging.warning(
                "conv_bias_fusion pass: casting bias "
                "from {} to {} to match the dtype of the weight of the conv layer"
                .format(new_bias_value.dtype, weight_np_type))
            new_bias_value = new_bias_value.astype(weight_np_type)
        new_bias_var = mb.const(val=new_bias_value, before_op=conv_op)

        conv_kargs = {
            "bias": new_bias_var,
            "name": out_name,
            "before_op": conv_op
        }

        for k, v in conv_op.inputs.items():
            if k == "bias":
                continue
            conv_kargs[k] = v

        if is_conv_op:
            x = mb.conv(**conv_kargs)
        else:
            x = mb.conv_transpose(**conv_kargs)

        add_op.enclosing_block.replace_uses_of_var_after_op(
            anchor_op=add_op, old_var=add_op.outputs[0], new_var=x)
        # Remove all the ops at once
        block.remove_ops([conv_op, add_op])
        return True
Пример #5
0
 def build(x):
     return mb.gelu(x=x)
 def prog(x):
     x = mb.reshape(x=x, shape=(1, 8), name="reshape")
     return x
 def prog(x):
     # op name "x" results in output var name "x", which shadows prog
     # input var name "x"
     x = mb.transpose(x=x, perm=[1, 0], name="x")
     x = mb.relu(x=x, name="relu")
     return x
Пример #8
0
 def CustomTopK(context, node):
     x = context[node.inputs[0]]
     k = context[node.inputs[1]]
     sorted = node.attr.get("sorted", False)
     x = mb.custom_topk(x=x, k=k.val, axis=-1, sorted=sorted, name=node.name)
     context.add(node.name, x)
Пример #9
0
 def body(i, ls):
     return mb.add(x=i, y=1), mb.list_write(ls=ls,
                                            index=i,
                                            value=update)
Пример #10
0
 def build(x):
     return mb.thresholded_relu(x=x, alpha=2.0)
Пример #11
0
 def prog(x):
     x1 = mb.transpose(x=x, perm=[0, 3, 1, 2])
     x2 = mb.relu(x=x)
     x3 = mb.transpose(x=x2, perm=[0, 3, 1, 2])
     x4 = mb.add(x=x1, y=x3)
     return mb.relu(x=x4)
Пример #12
0
 def test_builder_eval(self):
     x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32)
     v = mb.softsign(x=x_val)
     assert is_close(x_val / (1 + np.abs(x_val)), v.val)
Пример #13
0
 def build(x):
     return mb.softsign(x=x)
Пример #14
0
 def test_builder_eval(self):
     x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32)
     v = mb.softmax(x=x_val, axis=0)
     assert is_close(scipy.special.softmax(x_val, axis=0), v.val)
 def true_fn():
     # returns var with name x shadows input 'x'
     return mb.add(x=x, y=1, name='x')
Пример #16
0
 def cond(i, ls):
     return mb.less(x=i, y=num_iters)
 def false_fn():
     # two ops with name "x"
     return mb.add(x=x, y=-1, name='x')
Пример #18
0
 def body(a, b):
     # b is a loop invariant
     return mb.add(x=a, y=b), b
 def prog(x):
     x = mb.cast(x=x, dtype="fp16", name="castop")
     x = mb.cast(x=x, dtype="fp32", name="castop")
     x = mb.square(x=x, name="square_last")
     return x
Пример #20
0
 def cond(a, b):
     a_mean = mb.reduce_mean(x=a, axes=[0, 1])
     b_mean = mb.reduce_mean(x=b, axes=[0, 1])
     return mb.less(x=a_mean, y=b_mean)
Пример #21
0
    def convert_main_graph(self, prog, graph):
        func_inputs = {}
        for input_type in self.inputs:
            func_inputs[input_type.name] = mb.placeholder(
                input_type.shape.symbolic_shape, dtype=input_type.dtype)
        prog.set_main_input_types(self.inputs)

        with Function(func_inputs) as ssa_func:
            # Get the input Var
            for name in func_inputs.keys():
                self.context.add(name, ssa_func.inputs[name])
            outputs = convert_graph(self.context, graph, self.outputs)
            ssa_func.set_outputs(outputs)
            prog.add_function("main", ssa_func)
        # check duplicate output
        # Note: sometimes two outputs are pointing to the same Var, we should
        # create mb.identity for those cases
        block = prog["main"]
        with block:
            name_counts = {}
            new_outputs = [output for output in block.outputs]
            for i, v_o in enumerate(block.outputs):
                if v_o.name not in name_counts:
                    name_counts[v_o.name] = 1
                else:
                    name_counts[v_o.name] += 1
                    new_name = v_o.name + "_duplicate_" + str(
                        name_counts[v_o.name])
                    x = mb.identity(x=v_o, name=new_name)
                    new_outputs[i] = x
            block.set_outputs(new_outputs)

        # Rename outputs to TF's name. This is needed when the last op doesn't
        # generate a new Var (e.g., get_tuple, Identity etc.), and thus the
        # last Var would have a different name than the last TF op's name.
        #
        # Example:
        #
        # TF code:
        #    x = tf.placeholder(tf.float32, shape=(1,))
        #    y = tf.placeholder(tf.float32, shape=(1,))
        #    c = lambda i, j: \
        #            tf.less(tf.math.reduce_mean(i), tf.math.reduce_mean(j))
        #    b = lambda i, j: (tf.add(i, 1), j)
        #    res = tf.while_loop(c, b, [x, y])
        #
        # Resulting nodes (excluding the nodes in while loop cond & body):
        #
        # node name: Placeholder op type: Placeholder inputs: []
        # node name: Placeholder_1 op type: Placeholder inputs: []
        # node name: make_input_0 op type: make_tuple inputs: ['Placeholder',
        #         'Placeholder_1']
        # node name: while_0 op type: while inputs: ['make_input_0']
        # node name: while/Exit op type: get_tuple inputs: ['while_0']
        # node name: while/Exit_1 op type: get_tuple inputs: ['while_0']
        #
        # Observe that return node `while/Exit` is an output from get_tuple,
        # which in our translation simply unpack a python tuple of Vars
        # ('while_0:0', 'while_0:1') returned from while_0 SSA op. We need to
        # rename `while_0:0` to `while/Exit` in order for users to find the
        # output.
        # Note: only rename the output if the output is not Placeholder.

        input_names = [x.name for x in self.inputs]
        for v_o, out_name in zip(prog["main"].outputs, self.outputs):
            if v_o.name != out_name and v_o.name not in input_names:
                logging.info("Renaming output var: '{}' -> '{}'".format(
                    v_o.name, out_name))
                v_o.name = out_name
        self.check_placeholder_output(prog, self.outputs)
Пример #22
0
 def prog(a, b):
     return mb.while_loop(_cond=cond, _body=body, loop_vars=(a, b))
Пример #23
0
    def _try_to_transform_transpose_pattern(self, conv_op, block):
        if conv_op in self.ops_to_skip:
            return False

        ops_to_remove = []

        # conv layer
        if conv_op.op_type != "conv" and conv_op.op_type != "conv_transpose":
            return False
        is_deconv = conv_op.op_type == "conv_transpose"
        ops_to_remove.append(conv_op)

        # transpose layer
        if not _check_child_op_type(conv_op, "transpose"):
            return False
        transpose_op = list(conv_op.outputs[0].child_ops)[0]
        ops_to_remove.append(transpose_op)

        # add/sub layer
        if not _check_child_op_type(transpose_op,
                                    "add") and not _check_child_op_type(
                                        transpose_op, "sub"):
            return False
        add_or_sub_op = list(transpose_op.outputs[0].child_ops)[0]

        if add_or_sub_op in self.ops_to_skip:
            return False

        ops_to_remove.append(add_or_sub_op)

        # get the bias
        if add_or_sub_op.x.val is None and add_or_sub_op.y.val is None:
            return False
        bias = add_or_sub_op.x.val if add_or_sub_op.x.val is not None else add_or_sub_op.y.val
        is_first_input = add_or_sub_op.y.val is not None
        is_sub = add_or_sub_op.op_type == "sub"

        # get the conv bias/weight
        conv_shape = conv_op.outputs[0].shape
        Cout = conv_shape[1]
        conv_weight = conv_op.weight.val
        conv_weight_type = conv_weight.dtype
        conv_bias = np.zeros(Cout).astype(
            conv_weight_type) if conv_op.bias is None else conv_op.bias.val

        # check if the bias is compatible for fusion
        is_bias_scalar = True
        if isinstance(bias, np.ndarray):
            if bias.shape == ():
                bias = bias.tolist()
            elif np.prod(bias.shape) == 1:
                bias = np.squeeze(bias).tolist()
            else:
                is_bias_scalar = False

        if not is_bias_scalar:
            if np.prod(bias.shape) != Cout:
                return False
            rank = transpose_op.outputs[0].rank
            cout_dim = transpose_op.perm.val.tolist().index(1) - rank
            if bias.shape[cout_dim] != Cout:
                return False
            bias = np.reshape(bias, (Cout))

        # compute the new bias
        if is_sub:
            if is_first_input:
                bias = -bias
            else:
                conv_bias = -conv_bias

        new_bias = conv_bias + bias

        # compute the new weight
        if is_sub and not is_first_input:
            new_weight = -conv_weight
        else:
            new_weight = conv_weight

        # check that none of the op in this pattern is connected to the output
        # (except the last op)
        for op in ops_to_remove[:-1]:
            for out in op.outputs:
                if out in block.outputs:
                    return False

        # create a new conv op with the new weight, bias value, copying rest of the attributes
        conv_kargs = {
            "weight": new_weight,
            "bias": new_bias,
            "before_op": conv_op
        }

        for k, v in conv_op.inputs.items():
            if k in ["weight", "bias"]:
                continue
            conv_kargs[k] = v

        if is_deconv:
            x = mb.conv_transpose(**conv_kargs)
        else:
            x = mb.conv(**conv_kargs)

        # create a new transpose op
        out_name = add_or_sub_op.outputs[0].name
        tranpose_kargs = {"x": x, "name": out_name, "before_op": transpose_op}
        for k, v in transpose_op.inputs.items():
            if k == "x":
                continue
            tranpose_kargs[k] = v
        x = mb.transpose(**tranpose_kargs)

        add_or_sub_op.enclosing_block.replace_uses_of_var_after_op(
            anchor_op=add_or_sub_op,
            old_var=add_or_sub_op.outputs[0],
            new_var=x)

        # Remove all the ops at once
        block.remove_ops(ops_to_remove)
        return True
Пример #24
0
 def prog(a, b):
     return mb.mul(x=a, y=2), b
Пример #25
0
 def build(x):
     return [mb.gelu(x=x, mode=mode)]
Пример #26
0
 def prog(unused_input):
     return mb.const(val=[3, 2])
Пример #27
0
 def build(x):
     return mb.linear_activation(x=x, alpha=2.0, beta=3.0)
Пример #28
0
def try_to_transform(conv_op, add_op, block):
    if add_op.op_type == "sub":
        bias_var = add_op.y
    else:
        bias_var = add_op.x if add_op.x.val is not None else add_op.y
    bias_value = bias_var.val

    # check that the bias value is a constant array or a scalar constant
    if not isinstance(bias_value, (np.ndarray, np.generic)):
        return False

    is_bias_scalar = False
    if not isinstance(bias_value, np.ndarray):
        is_bias_scalar = True

    # find rank of the conv input
    rank = conv_op.x.rank
    if rank is None:
        return False
    if not (rank == 3 or rank == 4 or rank == 5):
        return False

    # check compatibility of bias value with the rank of the conv op
    # either bias value should be a scalar or:
    # rank=3 ==> (B,C,D), which means bias must be (1,C,1) or (C,1)
    # rank=4 ==> (B,C,D1,D2), which means bias must be (1,C,1,1) or (C,1,1)
    # rank=5 ==> (B,C,D1,D2,D3), which means bias must be (1,C,1,1,1) or (C,1,1,1)

    if is_bias_scalar:
        bias_value = np.array([bias_value])
    else:
        # check that there is at most one dimension in the shape that is not 1
        if len(np.squeeze(bias_value).shape) > 1:
            return False
        # check that addition is not happening on the batch dimension
        if len(bias_value) == rank:
            if bias_value.shape[0] != 1:
                return False
        # check that last rank-2 entries in the shape vector are all 1s
        if np.prod(bias_value.shape[-(rank - 2):]) != 1:
            return False
        bias_value = np.squeeze(bias_value)

    if add_op.op_type == "sub":
        bias_value *= -1

    # everything looks good, now find the new updated bias
    old_bias = conv_op.inputs.get("bias", None)
    old_bias_value = None
    if old_bias is not None and old_bias.val is not None:
        old_bias_value = old_bias.val
    if old_bias is None:
        # need to create a fresh numpy array for bias
        if np.prod(bias_value.shape) == 1:
            # its a scalar bias
            # need to find the value of Cout to form a new bias
            if conv_op.weight.val is None:
                return False
            Cout = conv_op.weight.val.shape[0]
            new_bias_value = np.broadcast_to(bias_value, (Cout, ))
        else:
            new_bias_value = bias_value
    else:
        # just need to update the existing bias array
        try:
            new_bias_value = old_bias_value + bias_value
        except:
            return False

    # create a new conv op with the new bias value, copying rest of the attributes
    out_name = add_op.outputs[0].name
    new_bias_var = mb.const(val=new_bias_value,
                            mode="file_value",
                            before_op=conv_op)

    conv_kargs = {"bias": new_bias_var, "name": out_name, "before_op": conv_op}

    for k, v in conv_op.inputs.items():
        if k == "bias":
            continue
        conv_kargs[k] = v

    if conv_op.op_type == "conv":
        x = mb.conv(**conv_kargs)
    else:
        x = mb.conv_transpose(**conv_kargs)

    add_op.enclosing_block.replace_uses_of_var_after_op(
        anchor_op=add_op, old_var=add_op.outputs[0], new_var=x)
    # Remove all the ops at once
    block.remove_ops([conv_op, add_op])
    return True
Пример #29
0
 def build(x):
     return mb.clamped_relu(x=x, alpha=2.0, beta=1.0)
Пример #30
0
 def build(x):
     return [
         mb.softplus_parametric(x=x, alpha=alpha_val, beta=beta_val)
     ]