Пример #1
0
def create_op(scope, op_type, inputs, outputs, attrs):
    kwargs = dict()

    for in_name, in_dup in Operator.get_op_inputs(op_type):
        if in_name in inputs:
            kwargs[in_name] = []
            if in_dup:
                sub_in = inputs[in_name]
                for sub_in_name, _ in sub_in:
                    var = scope.new_var(sub_in_name)
                    kwargs[in_name].append(sub_in_name)
            else:
                var = scope.new_var(in_name)
                kwargs[in_name].append(in_name)

    for out_name, out_dup in Operator.get_op_outputs(op_type):
        if out_name in outputs:
            kwargs[out_name] = []
            if out_dup:
                sub_in = outputs[out_name]
                for sub_in_name, _ in sub_in:
                    var = scope.new_var(sub_in_name)
                    kwargs[out_name].append(sub_in_name)
            else:
                var = scope.new_var(out_name)
                kwargs[out_name].append(out_name)

    for attr_name in Operator.get_op_attr_names(op_type):
        if attr_name in attrs:
            kwargs[attr_name] = attrs[attr_name]
    return Operator(op_type, **kwargs)
Пример #2
0
def fc(X, W, Y):
    ret_v = core.Net.create()

    ret_v.append_op(Operator("mul", X="X", Y="W", Out="pre_activation"))
    ret_v.append_op(Operator("sigmoid", X="pre_activation", Y=Y))
    ret_v.complete_add_op(True)
    return ret_v
Пример #3
0
    def create_step_net(self):
        stepnet = core.Net.create()
        x_fc_op = Operator("mul", X="x@alias", Y="W", Out="Wx")
        h_fc_op = Operator("mul", X="h@pre", Y="U", Out="Uh")
        sum_op = Operator("add", X="Wx", Y="Uh", Out="sum")
        sig_op = Operator("sigmoid", X="sum", Y="h@alias")

        for op in [x_fc_op, h_fc_op, sum_op, sig_op]:
            stepnet.append_op(op)
        stepnet.complete_add_op(True)
        self.rnnop.set_stepnet(stepnet)
Пример #4
0
    def create_sub_net(self):
        truenet = core.Net.create()
        scale_op_t = Operator("scale", X='X', Out='Out', scale=2.)
        truenet.append_op(scale_op_t)
        truenet.complete_add_op(True)
        self.condop.set_truenet(truenet)

        falsenet = core.Net.create()
        scale_op_t = Operator("scale", X='X', Out='Out', scale=-2.)
        falsenet.append_op(scale_op_t)
        falsenet.complete_add_op(True)
        self.condop.set_falsenet(falsenet)
Пример #5
0
def fc_layer(net, input, size, act="softmax", bias=True, param=None, name=None):
    """
    The fully connected layer.

    :param input: The name of input variable.
    :type input: str
    :param size: The size of fully connected layer.
    :param act: The name of activation.
    :param param: The attribute of learnable parameter which can be used to
                  modify initialization mean and std of the parameter.
    :param bias: The attribute of bias. If set False, this layer does not have
                 a bias.
    :param name: The name of this layer. If it is not set explictly, a name
                 will be generated automatically.
    :return: The name of the output variable.
    """

    if name is None:
        name = "fc_%d" % uniq_id()
    if not isinstance(name, str):
        raise ValueError("The name of a layer should be a string.")

    input_dims = scope.find_var(input).get_tensor().get_dims()

    w_name = param or name + ".w"
    init_param(net=init_net, param_name=w_name, dims=[input_dims[1], size])
    sgd_optimizer(net=optimize_net, param_name=w_name, learning_rate=0.01)

    pre_activation = name + ".mul.out"
    scope.new_var(pre_activation)
    mul_op = Operator("mul", X=input, Y=w_name, Out=pre_activation)
    net.append_op(mul_op)

    # create bias variable if needed
    if bias:
        bias_name = name + ".b"
        init_param(net=init_net, param_name=bias_name, dims=[size])
        sgd_optimizer(
            net=optimize_net, param_name=bias_name, learning_rate=0.001)
        bias_out = name + ".rowwise_add.out"
        scope.new_var(bias_out)
        rowwise_append_op = Operator(
            "rowwise_add", X=pre_activation, b=bias_name, Out=bias_out)
        net.append_op(rowwise_append_op)
        pre_activation = bias_out

    activation_op = Operator(act, X=pre_activation, Y=name)
    net.append_op(activation_op)
    scope.new_var(name)
    net.infer_shape(scope)
    return name
Пример #6
0
    def check_output_with_place(self, place):
        self.scope = core.Scope()
        op_inputs = self.inputs if hasattr(self, "inputs") else dict()
        op_attrs = self.attrs if hasattr(self, "attrs") else dict()
        self.op = create_op(self.scope, self.op_type, op_inputs, self.outputs,
                            op_attrs)
        if isinstance(place, core.GPUPlace) and not self.op.support_gpu():
            return
        set_input(self.scope, self.op, self.inputs, place)
        self.op.infer_shape(self.scope)
        ctx = core.DeviceContext.create(place)
        self.op.run(self.scope, ctx)

        for out_name, out_dup in Operator.get_op_outputs(self.op.type()):
            if out_dup:
                sub_out = self.outputs[out_name]
                for sub_out_name in sub_out:
                    actual = np.array(
                        self.scope.find_var(sub_out_name).get_tensor())
                    expect = sub_out[sub_out_name]
                    self.assertTrue(np.allclose(actual, expect, atol=1e-05),
                                    "output name: " + out_name + "has diff")
            else:
                actual = np.array(self.scope.find_var(out_name).get_tensor())
                expect = self.outputs[out_name]
                self.assertTrue(np.allclose(actual, expect, atol=1e-05),
                                "output name: " + out_name + "has diff")
Пример #7
0
def cross_entropy_layer(net, input, label):
    cost_name = "cross_entropy_%d" % uniq_id()
    cross_entropy_op = Operator(
        "onehot_cross_entropy", X=input, label=label, Y=cost_name)
    net.append_op(cross_entropy_op)
    scope.new_var(cost_name)
    net.infer_shape(scope)
    return cost_name
Пример #8
0
def sgd_optimizer(net, param_name, learning_rate=0.005):
    grad_name = grad_var_name(param_name)
    optimize_op = Operator(
        "sgd",
        param=param_name,
        grad=grad_name,
        param_out=param_name,
        learning_rate=learning_rate)
    net.append_op(optimize_op)
Пример #9
0
def set_output_grad(scope, op, outputs, place):
    for out_name, out_dup in Operator.get_op_outputs(op.type()):
        if out_name in outputs:
            if out_dup:
                sub_out = outputs[out_name]
                for sub_out_name, _ in sub_out:
                    out_tensor = scope.find_var(sub_out_name).get_tensor()
                    grad_tensor = scope.new_var(
                        grad_var_name(sub_out_name)).get_tensor()
                    grad_tensor.set_dims(out_tensor.shape())
                    data = np.ones(out_tensor.shape(), dtype=np.float32)
                    grad_tensor.set(data, place)
            else:
                out_tensor = scope.find_var(out_name).get_tensor()
                grad_tensor = scope.new_var(
                    grad_var_name(out_name)).get_tensor()
                grad_tensor.set_dims(out_tensor.shape())
                data = np.ones(out_tensor.shape(), dtype=np.float32)
                grad_tensor.set(data, place)
Пример #10
0
    def test_net_all(self):
        net = core.Net.create()
        op1 = Operator("add", X="X", Y="Y", Out="Out")
        net.append_op(op1)

        net2 = core.Net.create()
        net2.append_op(fc(X="X", W="w", Y="fc.out"))
        net2.complete_add_op(True)
        net.append_op(net2)
        net.complete_add_op(True)

        expected = '''
Op(plain_net), inputs:{all[W, X, Y]}, outputs:{all[Out, fc.out, pre_activation]}.
    Op(add), inputs:{X[X], Y[Y]}, outputs:{Out[Out]}.
    Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}.
        Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}.
            Op(mul), inputs:{X[X], Y[W]}, outputs:{Out[pre_activation]}.
            Op(sigmoid), inputs:{X[pre_activation]}, outputs:{Y[fc.out]}.
'''
        self.assertEqual(expected, "\n" + str(net))
Пример #11
0
    def uniform_random_test(self, place):
        scope = core.Scope()
        scope.new_var("X").get_tensor()

        op = Operator("uniform_random",
                      Out="X",
                      dims=[1000, 784],
                      min=-5.0,
                      max=10.0,
                      seed=10)

        op.infer_shape(scope)
        ctx = core.DeviceContext.create(place)
        op.run(scope, ctx)
        tensor = numpy.array(scope.find_var("X").get_tensor())
        self.assertAlmostEqual(tensor.mean(), 2.5, delta=0.1)
Пример #12
0
    def gaussian_random_test(self, place):
        scope = core.Scope()
        scope.new_var("Out").get_tensor()

        op = Operator("gaussian_random",
                      Out="Out",
                      dims=[1000, 784],
                      mean=.0,
                      std=1.,
                      seed=10)

        op.infer_shape(scope)
        context = core.DeviceContext.create(place)
        op.run(scope, context)
        tensor = numpy.array(scope.find_var("Out").get_tensor())
        self.assertAlmostEqual(numpy.mean(tensor), .0, delta=0.1)
        self.assertAlmostEqual(numpy.std(tensor), 1., delta=0.1)
Пример #13
0
def set_input(scope, op, inputs, place):
    for in_name, in_dup in Operator.get_op_inputs(op.type()):
        if in_name in inputs:
            if in_dup:
                sub_in = inputs[in_name]
                for sub_in_name, sub_in_val in sub_in:
                    var = scope.find_var(sub_in_name)
                    tensor = var.get_tensor()
                    sub_in_array = sub_in_val[0] \
                        if isinstance(sub_in_val, tuple) else sub_in_val
                    tensor.set_dims(sub_in_array.shape)
                    tensor.set(sub_in_array, place)
                    if isinstance(sub_in_val, tuple):
                        tensor.set_lod(sub_in_val[1])
            else:
                var = scope.find_var(in_name)
                tensor = var.get_tensor()
                in_val = inputs[in_name]
                in_array = in_val[0] if isinstance(in_val, tuple) else in_val
                tensor.set_dims(in_array.shape)
                tensor.set(in_array, place)
                if isinstance(in_val, tuple):
                    tensor.set_lod(in_val[1])
Пример #14
0
def init_param(net, param_name, dims):
    scope.new_var(param_name)
    op = Operator(
        "uniform_random", Out=param_name, dims=dims, min=-0.5, max=0.5, seed=10)
    op.infer_shape(scope)
    net.append_op(op)
Пример #15
0
        def test_all(self):
            scope = core.Scope()
            kwargs = dict()
            places = [core.CPUPlace()]
            if core.is_compile_gpu():
                places.append(core.GPUPlace(0))

            for place in places:
                for in_name in Operator.get_op_input_names(self.type):
                    if hasattr(self, "inputs") and in_name in self.inputs:
                        kwargs[in_name] = in_name
                        var = scope.new_var(in_name).get_tensor()
                        arr = self.inputs[in_name]
                        var.set_dims(arr.shape)
                        var.set(arr, place)
                    else:
                        kwargs[in_name] = "@EMPTY@"

                for out_name in Operator.get_op_output_names(self.type):
                    if not hasattr(self, "outputs"):
                        raise ValueError(
                            "The test op must set self.outputs dict.")
                    if out_name not in self.outputs:
                        raise ValueError(
                            "The %s is not in self.outputs dict." % (out_name))
                    kwargs[out_name] = out_name
                    scope.new_var(out_name).get_tensor()

                for attr_name in Operator.get_op_attr_names(self.type):
                    if hasattr(self, "attrs") and attr_name in self.attrs:
                        kwargs[attr_name] = self.attrs[attr_name]

                op = Operator(self.type, **kwargs)
                if isinstance(place, core.GPUPlace) and not op.support_gpu():
                    return

                op.infer_shape(scope)

                ctx = core.DeviceContext.create(place)
                op.run(scope, ctx)

                for out_name in Operator.get_op_output_names(self.type):
                    actual = numpy.array(scope.find_var(out_name).get_tensor())
                    expect = self.outputs[out_name]
                    self.assertTrue(numpy.allclose(actual, expect, atol=1e-05),
                                    "output name: " + out_name + "has diff")
Пример #16
0
 def test_normal(self):
     op = Operator("scale", X="X", Out="Out", scale=3.2)
     self.check_grad(op,
                     {"X": np.random.random((10, 10)).astype("float32")},
                     set("X"), "Out")