Ejemplo n.º 1
0
    def do_tensor_scatter_nd_add(params_blob, indices_blob, updates_blob):
        with flow.scope.placement(device_type, "0:0"):
            params_var = flow.get_variable(
                "params",
                shape=params_blob.shape,
                dtype=flow.float32,
                initializer=flow.constant_initializer(0),
            )
            updates_var = flow.get_variable(
                "updates",
                shape=updates_blob.shape,
                dtype=flow.float32,
                initializer=flow.constant_initializer(0),
            )
            params_var = flow.cast_to_current_logical_view(params_var)
            params_blob = flow.cast_to_current_logical_view(params_blob)
            updates_blob = flow.cast_to_current_logical_view(updates_blob)
            updates_var = flow.cast_to_current_logical_view(updates_var)
            params_var = params_var + params_blob
            updates_var = updates_var + updates_blob
            out = flow.tensor_scatter_nd_add(params_var, indices_blob,
                                             updates_var)
            flow.losses.add_loss(out)

        flow.watch_diff(params_var, params_grad_watcher)
        flow.watch_diff(updates_var, updates_grad_watcher)
        return out
Ejemplo n.º 2
0
    def do_tensor_scatter_nd_add(params_blob, indices_blob, updates_blob):
        with flow.scope.placement(device_type, "0:0"):
            params_var = flow.get_variable(
                "params",
                shape=params_blob.shape,
                dtype=flow.float32,
                initializer=flow.constant_initializer(0),
            )
            updates_var = flow.get_variable(
                "updates",
                shape=updates_blob.shape,
                dtype=flow.float32,
                initializer=flow.constant_initializer(0),
            )
            params_var = flow.cast_to_current_logical_view(params_var)
            params_blob = flow.cast_to_current_logical_view(params_blob)
            updates_blob = flow.cast_to_current_logical_view(updates_blob)
            updates_var = flow.cast_to_current_logical_view(updates_var)
            params_var = params_var + params_blob
            updates_var = updates_var + updates_blob
            out = flow.tensor_scatter_nd_add(params_var, indices_blob, updates_var)
            flow.optimizer.SGD(
                flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0
            ).minimize(out)

        flow.watch_diff(params_var, params_grad_watcher)
        flow.watch_diff(updates_var, updates_grad_watcher)
        return out
Ejemplo n.º 3
0
        def do_where(condition, x, y):
            with flow.scope.placement(device_type, "0:0"):
                x_var = flow.get_variable(
                    "x",
                    shape=x.shape,
                    dtype=flow.float,
                    initializer=flow.constant_initializer(0),
                )
                x_var = flow.cast_to_current_logical_view(x_var)
                x_var = x_var + x
                y_var = flow.get_variable(
                    "y",
                    shape=y.shape,
                    dtype=flow.float,
                    initializer=flow.constant_initializer(0),
                )
                y_var = flow.cast_to_current_logical_view(y_var)
                y_var = y_var + y

            z = flow.where(condition, x_var, y_var)

            with flow.scope.placement(device_type, "0:0"):
                flow.optimizer.SGD(flow.optimizer.PiecewiseConstantScheduler(
                    [], [1e-3]),
                                   momentum=0).minimize(z)

            flow.watch_diff(x_var, dz_dx_watcher)
            flow.watch_diff(y_var, dz_dy_watcher)
            return z
Ejemplo n.º 4
0
    def ConcatJob():
        with flow.scope.placement(device_type, "0:0"):
            x = flow.get_variable(
                "x",
                shape=x_shape,
                dtype=type_name_to_flow_type[dtype],
                initializer=flow.random_uniform_initializer(minval=-10,
                                                            maxval=10),
                trainable=True,
            )
            y = flow.get_variable(
                "y",
                shape=y_shape,
                dtype=type_name_to_flow_type[dtype],
                initializer=flow.random_uniform_initializer(minval=-10,
                                                            maxval=10),
                trainable=True,
            )
            x = flow.cast_to_current_logical_view(x)
            y = flow.cast_to_current_logical_view(y)
            loss = flow.concat([x, y], axis)
            flow.optimizer.SGD(flow.optimizer.PiecewiseConstantScheduler(
                [], [1e-4]),
                               momentum=0).minimize(loss)

            flow.watch(x, test_global_storage.Setter("x"))
            flow.watch_diff(x, test_global_storage.Setter("x_diff"))
            flow.watch(y, test_global_storage.Setter("y"))
            flow.watch_diff(y, test_global_storage.Setter("y_diff"))
            flow.watch(loss, test_global_storage.Setter("loss"))
            flow.watch_diff(loss, test_global_storage.Setter("loss_diff"))

            return loss
Ejemplo n.º 5
0
        def do_where(condition, x, y):
            with flow.scope.placement(device_type, "0:0"):
                x_var = flow.get_variable(
                    "x",
                    shape=x.shape,
                    dtype=flow.float,
                    initializer=flow.constant_initializer(0),
                )
                x_var = flow.cast_to_current_logical_view(x_var)
                x_var = x_var + x
                y_var = flow.get_variable(
                    "y",
                    shape=y.shape,
                    dtype=flow.float,
                    initializer=flow.constant_initializer(0),
                )
                y_var = flow.cast_to_current_logical_view(y_var)
                y_var = y_var + y

            z = flow.where(condition, x_var, y_var)

            with flow.scope.placement(device_type, "0:0"):
                flow.losses.add_loss(z)

            flow.watch_diff(x_var, dz_dx_watcher)
            flow.watch_diff(y_var, dz_dy_watcher)
            return z
Ejemplo n.º 6
0
 def foo_job(input_def: oft.Numpy.Placeholder(shape=(2, 5))):
     var = flow.get_variable(
         name="var",
         shape=(2, 5),
         dtype=flow.float,
         initializer=flow.ones_initializer(),
     )
     input_def = flow.cast_to_current_logical_view(input_def)
     var = flow.cast_to_current_logical_view(var)
     output = var + input_def
     return output
Ejemplo n.º 7
0
    def diag_job(
        input_tensor: tp.Numpy.Placeholder(shape=(input_shape),
                                           dtype=flow.float),
    ) -> tp.Numpy:
        input_var = flow.get_variable(
            "input_tensor",
            shape=(input_shape),
            dtype=flow.float,
            initializer=flow.zeros_initializer(),
            trainable=True,
        )

        input_tensor = input_tensor + input_var
        input_tensor = flow.cast_to_current_logical_view(input_tensor)
        input_tensor = flow.cast(input_tensor, type_name_to_flow_type[dtype])
        output = flow.diag(input_tensor, dim)
        if (output.dtype == flow.int64 or output.dtype == flow.int8
                or output.dtype == flow.int32):
            output = flow.cast(output, flow.float)
        flow.optimizer.Adam(
            flow.optimizer.PiecewiseConstantScheduler([],
                                                      [1e-4])).minimize(output)

        flow.watch(input_tensor, test_global_storage.Setter("x"))
        flow.watch_diff(input_tensor, test_global_storage.Setter("x_diff"))
        flow.watch(output, test_global_storage.Setter("output"))
        flow.watch_diff(output, test_global_storage.Setter("output_diff"))

        return output
Ejemplo n.º 8
0
 def pooling_job(x: tensor_def(x_shape, dtype=dtype)):
     v = flow.get_variable(
         "x",
         shape=x_shape,
         dtype=dtype,
         initializer=flow.constant_initializer(0),
         trainable=True,
     )
     v = flow.cast_to_current_logical_view(v)
     flow.watch_diff(v, assert_grad)
     x += v
     with flow.scope.placement(device_type, "0:0"):
         pooling_f = None
         if pooling_type == "AVG":
             pooling_f = getattr(flow.nn, "avg_pool{}d".format(dim))
         elif pooling_type == "MAX":
             pooling_f = getattr(flow.nn, "max_pool{}d".format(dim))
         else:
             raise ValueError("pooling_type must be AVG or MAX")
         y = pooling_f(
             x,
             ksize=ksize,
             strides=strides,
             padding=padding,
             data_format=data_format,
         )
     flow.optimizer.SGD(flow.optimizer.PiecewiseConstantScheduler(
         [], [1e-4]),
                        momentum=0).minimize(y)
     return y
Ejemplo n.º 9
0
        def gather_fn(
            params_def: oft.Numpy.Placeholder(input.shape, dtype=flow.float32),
            indices_def: oft.Numpy.Placeholder(index.shape, dtype=index_type),
        ) -> oft.Numpy:
            with flow.scope.placement(device_type, "0:0"):
                x_var = flow.get_variable(
                    "input",
                    shape=input.shape,
                    dtype=flow.float32,
                    initializer=flow.constant_initializer(0),
                )
                x_var = flow.cast_to_current_logical_view(x_var)
                x = x_var + params_def
                x_f16 = flow.cast(x, flow.float16)

            y_f16 = flow.dim_gather(x_f16, dim, indices_def)
            x_f32 = flow.cast(x, flow.float32)
            y_f32 = flow.cast(y_f16, flow.float32)

            y = flow.dim_gather(x, dim, indices_def)

            with flow.scope.placement(device_type, "0:0"):
                flow.optimizer.SGD(flow.optimizer.PiecewiseConstantScheduler(
                    [], [1e-3]),
                                   momentum=0).minimize(y_f32)

            flow.watch_diff(x_f32, _compare_diff)
            return y_f32
Ejemplo n.º 10
0
    def hybrid_concat_job(
        input_0_def: oft.ListNumpy.Placeholder(shape=static_shape, dtype=flow.float),
        input_1_def: oft.ListNumpy.Placeholder(shape=static_shape, dtype=flow.float),
    ):
        var = flow.get_variable(
            "var",
            shape=static_shape,
            dtype=flow.float,
            initializer=flow.random_uniform_initializer(),
            trainable=True,
        )
        constant = flow.constant(1.0, dtype=flow.float, shape=rand_sub_shape)
        inputs = [
            flow.cast_to_current_logical_view(input)
            for input in [var, input_0_def, input_1_def, constant]
        ]
        concated = flow.concat(inputs, axis=axis, max_dim_size=max_dim_size,)
        if verbose:
            print("concated static shape:", concated.shape)

        flow.optimizer.SGD(
            flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0
        ).minimize(concated)
        flow.watch_diff(var, compare_var_diff)

        if max_dim_size is None:
            test_case.assertTrue(
                concated.shape[axis] == (static_shape[axis] * 3 + rand_sub_shape[axis])
            )
        else:
            test_case.assertTrue(concated.shape[axis] == max_dim_size)

        return var, concated
Ejemplo n.º 11
0
    def dynamic_concat_job(
        input_0_def: oft.ListNumpy.Placeholder(
            shape=input_static_shape, dtype=flow.float
        ),
        input_1_def: oft.ListNumpy.Placeholder(
            shape=input_static_shape, dtype=flow.float
        ),
    ):
        var_0 = flow.get_variable(
            "Var0",
            shape=(1,),
            dtype=flow.float,
            initializer=flow.constant_initializer(value=1, dtype=flow.float),
            trainable=True,
        )
        var_1 = flow.get_variable(
            "Var1",
            shape=(1,),
            dtype=flow.float,
            initializer=flow.constant_initializer(value=1, dtype=flow.float),
            trainable=True,
        )
        var_0 = flow.cast_to_current_logical_view(var_0)
        var_1 = flow.cast_to_current_logical_view(var_1)
        input_0_def = flow.cast_to_current_logical_view(input_0_def)
        input_1_def = flow.cast_to_current_logical_view(input_1_def)
        if callable(watch_cb):
            flow.watch(var_0, watch_cb)
            flow.watch(var_1, watch_cb)
            flow.watch(flow.identity(input_0_def), watch_cb)
            flow.watch(flow.identity(input_1_def), watch_cb)

        var_0 = var_0 * input_0_def
        var_1 = var_1 * input_1_def
        if callable(watch_cb):
            flow.watch(var_0, watch_cb)
            flow.watch(var_1, watch_cb)

        result = flow.concat(
            [var_0, var_1], axis=axis, max_dim_size=input_static_shape[axis]
        )
        flow.optimizer.SGD(
            flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0
        ).minimize(result)
        flow.watch_diff(var_0, make_watch_diff_cb(0))
        flow.watch_diff(var_1, make_watch_diff_cb(1))
        return result
Ejemplo n.º 12
0
    def DynamicConvJob(x: oft.ListNumpy.Placeholder((10, 3, 100, 100))):
        with flow.scope.placement(device_type, "0:0"):
            x_var = flow.get_variable(
                name="v1",
                shape=(1, ),
                dtype=flow.float,
                initializer=flow.zeros_initializer(),
            )
            x_var = flow.cast_to_current_logical_view(x_var)
            x += x_var
            if data_format == "NCHW":
                weight_shape = (filters, x_shape[1] // groups, kernel_size,
                                kernel_size)
            else:
                weight_shape = (filters, kernel_size, kernel_size,
                                x_shape[3] // groups)
            weight = flow.get_variable(
                "conv-weight",
                shape=weight_shape,
                dtype=flow.float,
                initializer=flow.random_uniform_initializer(minval=0,
                                                            maxval=100),
            )
            weight = flow.cast_to_current_logical_view(weight)
            loss = flow.nn.conv2d(
                x,
                weight,
                strides=[stride, stride],
                padding=of_padding,
                data_format=data_format,
                dilations=[1, 1],
                groups=groups,
            )
            flow.optimizer.SGD(flow.optimizer.PiecewiseConstantScheduler(
                [], [1e-4]),
                               momentum=0).minimize(loss)

            flow.watch(x, global_storage_setter("x"))
            flow.watch_diff(x, global_storage_setter("x_diff"))
            flow.watch(weight, global_storage_setter("weight"))
            flow.watch_diff(weight, global_storage_setter("weight_diff"))
            flow.watch(loss, global_storage_setter("loss"))
            flow.watch_diff(loss, global_storage_setter("loss_diff"))

            return loss
Ejemplo n.º 13
0
 def do_gather_nd(x, index):
     x_var = flow.get_variable(
         "params", shape=(1,), dtype=x_dtype, initializer=flow.zeros_initializer(),
     )
     x = x + flow.cast_to_current_logical_view(x_var)
     y = flow.gather_nd(x, index)
     if need_grad:
         flow.optimizer.SGD(
             flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0
         ).minimize(y)
         if callable(comp_diff_fn):
             flow.watch_diff(x, comp_diff_fn)
     return y
Ejemplo n.º 14
0
 def do_gather(x_blob, i_blob):
     with flow.scope.placement(device_type, "0:0"):
         x = flow.get_variable(
             "params",
             shape=params.shape,
             dtype=flow.float32,
             initializer=flow.constant_initializer(0),
         )
         x = flow.cast_to_current_logical_view(x)
         x = x + x_blob
         y = flow.gather(x, i_blob, axis=axis, batch_dims=batch_dims)
         flow.losses.add_loss(y)
     flow.watch_diff(x, compare_fn)
     return y
Ejemplo n.º 15
0
 def do_scatter_nd(indices_blob, updates_blob):
     with flow.scope.placement(device_type, "0:0"):
         x = flow.get_variable(
             "updates",
             shape=updates.shape,
             dtype=flow.float32,
             initializer=flow.constant_initializer(0),
         )
         x = flow.cast_to_current_logical_view(x)
         x = x + updates_blob
         y = flow.scatter_nd(indices_blob, x, shape)
         flow.losses.add_loss(y)
     flow.watch_diff(x, compare_fn)
     return y
Ejemplo n.º 16
0
 def DynamicReshapeJob(x: oft.ListNumpy.Placeholder(data_shape)):
     reshape_out1 = flow.reshape(x, (-1, 20))
     my_model = flow.get_variable(
         "my_model",
         shape=(20, 32),
         dtype=flow.float,
         initializer=flow.random_uniform_initializer(minval=-10, maxval=10),
         trainable=True,
     )
     my_model = flow.cast_to_current_logical_view(my_model)
     mm_out = flow.matmul(reshape_out1, my_model)
     reshape_out2 = flow.reshape(mm_out, (-1, 8, 4))
     flow.losses.add_loss(reshape_out2)
     return reshape_out1
Ejemplo n.º 17
0
        def clip(values_blob):
            with flow.scope.placement(device_type, "0:0"):
                x = flow.get_variable(
                    "values",
                    shape=values.shape,
                    dtype=data_type,
                    initializer=flow.constant_initializer(0),
                )
                x = flow.cast_to_current_logical_view(x)
                x = x + values_blob
                y = flow.clip_by_value(x, min, max)
                flow.losses.add_loss(y)

            flow.watch_diff(x, grad_cb)
            return y
Ejemplo n.º 18
0
    def oneflow_Xmum(
        of_input_1: tp.ListNumpy.Placeholder(shape=data_shape),
        of_input_2: tp.ListNumpy.Placeholder(shape=data_shape),
    ) -> tp.ListNumpy:
        with flow.scope.placement(device_type, "0:0"):
            v1 = flow.get_variable(
                shape=(1, ),
                dtype=flow.float32,
                initializer=flow.zeros_initializer(),
                name="x1_var",
            )
            v1 = flow.cast_to_current_logical_view(v1)
            x1_var = of_input_1 + v1
            v2 = flow.get_variable(
                shape=(1, ),
                dtype=flow.float32,
                initializer=flow.zeros_initializer(),
                name="x2_var",
            )
            v2 = flow.cast_to_current_logical_view(v2)
            x2_var = of_input_2 + v2

        flow.watch_diff(x1_var,
                        assert_prediction_grad)  # Only Compare input1 Grad

        if compare_type == "maximum":
            of_Xmum_out = flow.math.maximum(x1_var, x2_var)
        elif compare_type == "minimum":
            of_Xmum_out = flow.math.minimum(x1_var, x2_var)

        with flow.scope.placement(device_type, "0:0"):
            flow.optimizer.SGD(flow.optimizer.PiecewiseConstantScheduler(
                [], [1e-3]),
                               momentum=0).minimize(of_Xmum_out)

        return of_Xmum_out
Ejemplo n.º 19
0
 def do_gather(x_blob, i_blob):
     with flow.scope.placement(device_type, "0:0"):
         x = flow.get_variable(
             "params",
             shape=params.shape,
             dtype=flow.float32,
             initializer=flow.constant_initializer(0),
         )
         x = flow.cast_to_current_logical_view(x)
         x = x + x_blob
         y = flow.gather(x, i_blob, axis=axis, batch_dims=batch_dims)
         flow.optimizer.SGD(flow.optimizer.PiecewiseConstantScheduler(
             [], [1e-3]),
                            momentum=0).minimize(y)
     flow.watch_diff(x, compare_fn)
     return y
Ejemplo n.º 20
0
 def do_scatter_nd(indices_blob, updates_blob):
     with flow.scope.placement(device_type, "0:0"):
         x = flow.get_variable(
             "updates",
             shape=updates.shape,
             dtype=flow.float32,
             initializer=flow.constant_initializer(0),
         )
         x = flow.cast_to_current_logical_view(x)
         x = x + updates_blob
         y = flow.scatter_nd(indices_blob, x, shape)
         flow.optimizer.SGD(
             flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0
         ).minimize(y)
     flow.watch_diff(x, compare_fn)
     return y
Ejemplo n.º 21
0
 def DynamicReshapeJob(x: oft.ListNumpy.Placeholder(data_shape)):
     reshape_out1 = flow.reshape(x, (-1, 20))
     my_model = flow.get_variable(
         "my_model",
         shape=(20, 32),
         dtype=flow.float,
         initializer=flow.random_uniform_initializer(minval=-10, maxval=10),
         trainable=True,
     )
     my_model = flow.cast_to_current_logical_view(my_model)
     mm_out = flow.matmul(reshape_out1, my_model)
     reshape_out2 = flow.reshape(mm_out, (-1, 8, 4))
     flow.optimizer.SGD(
         flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0
     ).minimize(reshape_out2)
     return reshape_out1
Ejemplo n.º 22
0
        def clip(values_blob):
            with flow.scope.placement(device_type, "0:0"):
                x = flow.get_variable(
                    "values",
                    shape=values.shape,
                    dtype=data_type,
                    initializer=flow.constant_initializer(0),
                )
                x = flow.cast_to_current_logical_view(x)
                x = x + values_blob
                y = flow.clip_by_value(x, min, max)
                flow.optimizer.SGD(
                    flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0
                ).minimize(y)

            flow.watch_diff(x, grad_cb)
            return y
Ejemplo n.º 23
0
def reshape(
    x: remote_blob_util.BlobDef, shape: Sequence[int], name: Optional[str] = None
) -> remote_blob_util.BlobDef:
    r"""Reshapes a blob.

    Args:
        x: A `Blob`.
        shape: Shape of the output blob.
        name: A name for the operation (optional).
    Returns:
        A `Blob`, has the same type as `x`.
    """
    x = flow.cast_to_current_logical_view(x)
    assert isinstance(shape, tuple) or isinstance(shape, list)
    shape = list(shape)
    assert all(dim == -1 or dim > 0 for dim in shape)
    assert shape.count(-1) <= 1
    if not x.is_dynamic:
        if name is None:
            name = id_util.UniqueStr("Reshape_")
        return (
            flow.user_op_builder(name)
            .Op("reshape")
            .Input("in", [x])
            .Output("out")
            .Attr("shape", infer_shape(x, shape))
            .Build()
            .InferAndTryRun()
            .RemoteBlobList()[0]
        )
    else:
        op_conf = op_conf_util.OperatorConf()
        setattr(
            op_conf,
            "name",
            name if name is not None else id_util.UniqueStr("DynamicReshape_"),
        )
        setattr(op_conf.dynamic_reshape_conf, "in", x.unique_name)
        op_conf.dynamic_reshape_conf.shape.dim.extend(list(shape))
        setattr(op_conf.dynamic_reshape_conf, "out", "out")
        interpret_util.Forward(op_conf)
        lbi = logical_blob_id_util.LogicalBlobId()
        lbi.op_name = op_conf.name
        lbi.blob_name = "out"
        return remote_blob_util.RemoteBlob(lbi)
Ejemplo n.º 24
0
    def gather_nd_fn(
        params_def: oft.ListNumpy.Placeholder(static_params_shape, dtype=flow.float),
        indices_def: oft.ListNumpy.Placeholder(indices.shape, dtype=flow.int32),
    ):
        with flow.scope.placement("gpu", "0:0"):
            one_var = flow.get_variable(
                "one",
                shape=(1,),
                dtype=flow.float32,
                initializer=flow.constant_initializer(1),
            )
            one_var = flow.cast_to_current_logical_view(one_var)
            params_var = params_def * one_var
            y = flow.gather_nd(params_var, indices_def)
            flow.losses.add_loss(y)

        flow.watch_diff(params_var, compare_fn)
        return y
Ejemplo n.º 25
0
    def gather_nd_fn(
        params_def: oft.ListNumpy.Placeholder(static_params_shape, dtype=flow.float),
        indices_def: oft.ListNumpy.Placeholder(indices.shape, dtype=flow.int32),
    ):
        with flow.scope.placement("gpu", "0:0"):
            one_var = flow.get_variable(
                "one",
                shape=(1,),
                dtype=flow.float32,
                initializer=flow.constant_initializer(1),
            )
            one_var = flow.cast_to_current_logical_view(one_var)
            params_var = params_def * one_var
            y = flow.gather_nd(params_var, indices_def)
            flow.optimizer.SGD(
                flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0
            ).minimize(y)

        flow.watch_diff(params_var, compare_fn)
        return y
Ejemplo n.º 26
0
        def op_function(x: tp.Numpy.Placeholder(input.shape, dtype=flow.float32)):
            with flow.scope.placement(device_type, "0:0"):
                x_var = flow.get_variable(
                    name="input",
                    shape=input.shape,
                    dtype=flow.float32,
                    initializer=flow.constant_initializer(0),
                )
                x_var = flow.cast_to_current_logical_view(x_var)
                input_x = x_var + x
                x_fp32 = flow.cast(input_x, flow.float32)
                x_fp16 = flow.cast(input_x, dtype=flow.float16)
                y_fp16 = flow.reflection_pad2d(x_fp16, padding)
                y_fp32 = flow.cast(y_fp16, dtype=flow.float32)
                flow.optimizer.SGD(
                    flow.optimizer.PiecewiseConstantScheduler([], [0]), momentum=0
                ).minimize(y_fp32)

            flow.watch_diff(x_fp32, _compare_diff)
            return y_fp32
Ejemplo n.º 27
0
    def DynamicDimGatherJob(
        params_def: oft.ListNumpy.Placeholder(datashape, dtype=flow.float32),
        index_def: oft.ListNumpy.Placeholder(datashape, dtype=flow.int32),
    ) -> oft.ListNumpy:
        x_var = flow.get_variable(
            "input",
            shape=(1, ),
            dtype=flow.float32,
            initializer=flow.constant_initializer(0),
        )
        x_var = flow.cast_to_current_logical_view(x_var)
        x = x_var + params_def

        y = flow.dim_gather(x, sample["dim"], index_def)

        flow.optimizer.SGD(flow.optimizer.PiecewiseConstantScheduler([],
                                                                     [1e-3]),
                           momentum=0).minimize(y)

        flow.watch_diff(x, _compare_diff)
        return y