示例#1
0
 def do_where(condition, x, y):
     with flow.scope.placement(device_type, "0:0"):
         x_var = flow.get_variable(
             "x",
             shape=x.shape,
             dtype=flow.float,
             initializer=flow.constant_initializer(0),
         )
         x_var = flow.cast_to_current_logical_view(x_var)
         x_var = x_var + x
         y_var = flow.get_variable(
             "y",
             shape=y.shape,
             dtype=flow.float,
             initializer=flow.constant_initializer(0),
         )
         y_var = flow.cast_to_current_logical_view(y_var)
         y_var = y_var + y
     z = flow.where(condition, x_var, y_var)
     with flow.scope.placement(device_type, "0:0"):
         flow.optimizer.SGD(
             flow.optimizer.PiecewiseConstantScheduler([], [0.001]), momentum=0
         ).minimize(z)
     flow.watch_diff(x_var, dz_dx_watcher)
     flow.watch_diff(y_var, dz_dy_watcher)
     return z
示例#2
0
 def ConcatJob():
     with flow.scope.placement(device_type, "0:0"):
         x = flow.get_variable(
             "x",
             shape=x_shape,
             dtype=type_name_to_flow_type[dtype],
             initializer=flow.random_uniform_initializer(minval=-10, maxval=10),
             trainable=True,
         )
         y = flow.get_variable(
             "y",
             shape=y_shape,
             dtype=type_name_to_flow_type[dtype],
             initializer=flow.random_uniform_initializer(minval=-10, maxval=10),
             trainable=True,
         )
         x = flow.cast_to_current_logical_view(x)
         y = flow.cast_to_current_logical_view(y)
         loss = flow.concat([x, y], axis)
         flow.optimizer.SGD(
             flow.optimizer.PiecewiseConstantScheduler([], [0.0001]), momentum=0
         ).minimize(loss)
         flow.watch(x, test_global_storage.Setter("x"))
         flow.watch_diff(x, test_global_storage.Setter("x_diff"))
         flow.watch(y, test_global_storage.Setter("y"))
         flow.watch_diff(y, test_global_storage.Setter("y_diff"))
         flow.watch(loss, test_global_storage.Setter("loss"))
         flow.watch_diff(loss, test_global_storage.Setter("loss_diff"))
         return loss
示例#3
0
 def do_tensor_scatter_nd_add(params_blob, indices_blob, updates_blob):
     with flow.scope.placement(device_type, "0:0"):
         params_var = flow.get_variable(
             "params",
             shape=params_blob.shape,
             dtype=flow.float32,
             initializer=flow.constant_initializer(0),
         )
         updates_var = flow.get_variable(
             "updates",
             shape=updates_blob.shape,
             dtype=flow.float32,
             initializer=flow.constant_initializer(0),
         )
         params_var = flow.cast_to_current_logical_view(params_var)
         params_blob = flow.cast_to_current_logical_view(params_blob)
         updates_blob = flow.cast_to_current_logical_view(updates_blob)
         updates_var = flow.cast_to_current_logical_view(updates_var)
         params_var = params_var + params_blob
         updates_var = updates_var + updates_blob
         out = flow.tensor_scatter_nd_add(params_var, indices_blob,
                                          updates_var)
         flow.optimizer.SGD(flow.optimizer.PiecewiseConstantScheduler(
             [], [0.001]),
                            momentum=0).minimize(out)
     flow.watch_diff(params_var, params_grad_watcher)
     flow.watch_diff(updates_var, updates_grad_watcher)
     return out
示例#4
0
 def oneflow_Xmum(
     of_input_1: tp.ListNumpy.Placeholder(shape=data_shape),
     of_input_2: tp.ListNumpy.Placeholder(shape=data_shape),
 ) -> tp.ListNumpy:
     with flow.scope.placement(device_type, "0:0"):
         v1 = flow.get_variable(
             shape=(1, ),
             dtype=flow.float32,
             initializer=flow.zeros_initializer(),
             name="x1_var",
         )
         v1 = flow.cast_to_current_logical_view(v1)
         x1_var = of_input_1 + v1
         v2 = flow.get_variable(
             shape=(1, ),
             dtype=flow.float32,
             initializer=flow.zeros_initializer(),
             name="x2_var",
         )
         v2 = flow.cast_to_current_logical_view(v2)
         x2_var = of_input_2 + v2
     flow.watch_diff(x1_var, assert_prediction_grad)
     if compare_type == "maximum":
         of_Xmum_out = flow.math.maximum(x1_var, x2_var)
     elif compare_type == "minimum":
         of_Xmum_out = flow.math.minimum(x1_var, x2_var)
     with flow.scope.placement(device_type, "0:0"):
         flow.optimizer.SGD(flow.optimizer.PiecewiseConstantScheduler(
             [], [0.001]),
                            momentum=0).minimize(of_Xmum_out)
     return of_Xmum_out
 def foo_job(input_def: oft.Numpy.Placeholder(shape=(2, 5))):
     var = flow.get_variable(
         name="var",
         shape=(2, 5),
         dtype=flow.float,
         initializer=flow.ones_initializer(),
     )
     input_def = flow.cast_to_current_logical_view(input_def)
     var = flow.cast_to_current_logical_view(var)
     output = var + input_def
     return output
示例#6
0
 def hybrid_concat_job(
     input_0_def: oft.ListNumpy.Placeholder(shape=static_shape, dtype=flow.float),
     input_1_def: oft.ListNumpy.Placeholder(shape=static_shape, dtype=flow.float),
 ):
     var = flow.get_variable(
         "var",
         shape=static_shape,
         dtype=flow.float,
         initializer=flow.random_uniform_initializer(),
         trainable=True,
     )
     constant = flow.constant(1.0, dtype=flow.float, shape=rand_sub_shape)
     inputs = [
         flow.cast_to_current_logical_view(input)
         for input in [var, input_0_def, input_1_def, constant]
     ]
     concated = flow.concat(inputs, axis=axis, max_dim_size=max_dim_size)
     if verbose:
         print("concated static shape:", concated.shape)
     flow.optimizer.SGD(
         flow.optimizer.PiecewiseConstantScheduler([], [0.001]), momentum=0
     ).minimize(concated)
     flow.watch_diff(var, compare_var_diff)
     if max_dim_size is None:
         test_case.assertTrue(
             concated.shape[axis] == static_shape[axis] * 3 + rand_sub_shape[axis]
         )
     else:
         test_case.assertTrue(concated.shape[axis] == max_dim_size)
     return (var, concated)
示例#7
0
文件: test_pool.py 项目: zzk0/oneflow
 def pooling_job(x: tensor_def(x_shape, dtype=dtype)):
     v = flow.get_variable(
         "x",
         shape=x_shape,
         dtype=dtype,
         initializer=flow.constant_initializer(0),
         trainable=True,
     )
     v = flow.cast_to_current_logical_view(v)
     flow.watch_diff(v, assert_grad)
     x += v
     with flow.scope.placement(device_type, "0:0"):
         pooling_f = None
         if pooling_type == "AVG":
             pooling_f = getattr(flow.nn, "avg_pool{}d".format(dim))
         elif pooling_type == "MAX":
             pooling_f = getattr(flow.nn, "max_pool{}d".format(dim))
         else:
             raise ValueError("pooling_type must be AVG or MAX")
         y = pooling_f(
             x,
             ksize=ksize,
             strides=strides,
             padding=padding,
             data_format=data_format,
         )
     flow.optimizer.SGD(
         flow.optimizer.PiecewiseConstantScheduler([], [0.0001]), momentum=0
     ).minimize(y)
     return y
示例#8
0
 def dynamic_concat_job(
     input_0_def: oft.ListNumpy.Placeholder(
         shape=input_static_shape, dtype=flow.float
     ),
     input_1_def: oft.ListNumpy.Placeholder(
         shape=input_static_shape, dtype=flow.float
     ),
 ):
     var_0 = flow.get_variable(
         "Var0",
         shape=(1,),
         dtype=flow.float,
         initializer=flow.constant_initializer(value=1, dtype=flow.float),
         trainable=True,
     )
     var_1 = flow.get_variable(
         "Var1",
         shape=(1,),
         dtype=flow.float,
         initializer=flow.constant_initializer(value=1, dtype=flow.float),
         trainable=True,
     )
     var_0 = flow.cast_to_current_logical_view(var_0)
     var_1 = flow.cast_to_current_logical_view(var_1)
     input_0_def = flow.cast_to_current_logical_view(input_0_def)
     input_1_def = flow.cast_to_current_logical_view(input_1_def)
     if callable(watch_cb):
         flow.watch(var_0, watch_cb)
         flow.watch(var_1, watch_cb)
         flow.watch(flow.identity(input_0_def), watch_cb)
         flow.watch(flow.identity(input_1_def), watch_cb)
     var_0 = var_0 * input_0_def
     var_1 = var_1 * input_1_def
     if callable(watch_cb):
         flow.watch(var_0, watch_cb)
         flow.watch(var_1, watch_cb)
     result = flow.concat(
         [var_0, var_1], axis=axis, max_dim_size=input_static_shape[axis]
     )
     flow.optimizer.SGD(
         flow.optimizer.PiecewiseConstantScheduler([], [0.0001]), momentum=0
     ).minimize(result)
     flow.watch_diff(var_0, make_watch_diff_cb(0))
     flow.watch_diff(var_1, make_watch_diff_cb(1))
     return result
 def DynamicConvJob(x: oft.ListNumpy.Placeholder((10, 3, 100, 100))):
     with flow.scope.placement(device_type, "0:0"):
         x_var = flow.get_variable(
             name="v1",
             shape=(1, ),
             dtype=flow.float,
             initializer=flow.zeros_initializer(),
         )
         x_var = flow.cast_to_current_logical_view(x_var)
         x += x_var
         if data_format == "NCHW":
             weight_shape = (filters, x_shape[1] // groups, kernel_size,
                             kernel_size)
         else:
             weight_shape = (filters, kernel_size, kernel_size,
                             x_shape[3] // groups)
         weight = flow.get_variable(
             "conv-weight",
             shape=weight_shape,
             dtype=flow.float,
             initializer=flow.random_uniform_initializer(minval=0,
                                                         maxval=100),
         )
         weight = flow.cast_to_current_logical_view(weight)
         loss = flow.nn.conv2d(
             x,
             weight,
             strides=[stride, stride],
             padding=of_padding,
             data_format=data_format,
             dilations=[1, 1],
             groups=groups,
         )
         flow.optimizer.SGD(flow.optimizer.PiecewiseConstantScheduler(
             [], [0.0001]),
                            momentum=0).minimize(loss)
         flow.watch(x, global_storage_setter("x"))
         flow.watch_diff(x, global_storage_setter("x_diff"))
         flow.watch(weight, global_storage_setter("weight"))
         flow.watch_diff(weight, global_storage_setter("weight_diff"))
         flow.watch(loss, global_storage_setter("loss"))
         flow.watch_diff(loss, global_storage_setter("loss_diff"))
         return loss
示例#10
0
 def do_gather(x_blob, i_blob):
     with flow.scope.placement(device_type, "0:0"):
         x = flow.get_variable(
             "params",
             shape=params.shape,
             dtype=flow.float32,
             initializer=flow.constant_initializer(0),
         )
         x = flow.cast_to_current_logical_view(x)
         x = x + x_blob
         y = flow.gather(x, i_blob, axis=axis, batch_dims=batch_dims)
         flow.optimizer.SGD(flow.optimizer.PiecewiseConstantScheduler(
             [], [0.001]),
                            momentum=0).minimize(y)
     flow.watch_diff(x, compare_fn)
     return y
示例#11
0
 def do_gather_nd(x, index):
     x_var = flow.get_variable(
         "params",
         shape=(1,),
         dtype=x_dtype,
         initializer=flow.constant_initializer(0, x_dtype),
     )
     x = x + flow.cast_to_current_logical_view(x_var)
     y = flow.gather_nd(x, index)
     if need_grad:
         flow.optimizer.SGD(
             flow.optimizer.PiecewiseConstantScheduler([], [0.001]), momentum=0
         ).minimize(y)
         if callable(comp_diff_fn):
             flow.watch_diff(x, comp_diff_fn)
     return y
示例#12
0
 def do_scatter_nd(indices_blob, updates_blob):
     with flow.scope.placement(device_type, "0:0"):
         x = flow.get_variable(
             "updates",
             shape=updates.shape,
             dtype=flow.float32,
             initializer=flow.constant_initializer(0),
         )
         x = flow.cast_to_current_logical_view(x)
         x = x + updates_blob
         y = flow.scatter_nd(indices_blob, x, shape)
         flow.optimizer.SGD(flow.optimizer.PiecewiseConstantScheduler(
             [], [0.001]),
                            momentum=0).minimize(y)
     flow.watch_diff(x, compare_fn)
     return y
示例#13
0
 def clip(values_blob):
     with flow.scope.placement(device_type, "0:0"):
         x = flow.get_variable(
             "values",
             shape=values.shape,
             dtype=data_type,
             initializer=flow.constant_initializer(0),
         )
         x = flow.cast_to_current_logical_view(x)
         x = x + values_blob
         y = flow.clip_by_value(x, min, max)
         flow.optimizer.SGD(
             flow.optimizer.PiecewiseConstantScheduler([], [0.001]), momentum=0
         ).minimize(y)
     flow.watch_diff(x, grad_cb)
     return y
示例#14
0
 def DynamicReshapeJob(x: oft.ListNumpy.Placeholder(data_shape)):
     reshape_out1 = flow.reshape(x, (-1, 20))
     my_model = flow.get_variable(
         "my_model",
         shape=(20, 32),
         dtype=flow.float,
         initializer=flow.random_uniform_initializer(minval=-10,
                                                     maxval=10),
         trainable=True,
     )
     my_model = flow.cast_to_current_logical_view(my_model)
     mm_out = flow.matmul(reshape_out1, my_model)
     reshape_out2 = flow.reshape(mm_out, (-1, 8, 4))
     flow.optimizer.SGD(flow.optimizer.PiecewiseConstantScheduler(
         [], [0.0001]),
                        momentum=0).minimize(reshape_out2)
     return reshape_out1
示例#15
0
 def DynamicDimGatherJob(
     params_def: oft.ListNumpy.Placeholder(datashape, dtype=flow.float32),
     index_def: oft.ListNumpy.Placeholder(datashape, dtype=flow.int32),
 ) -> oft.ListNumpy:
     x_var = flow.get_variable(
         "input",
         shape=(1,),
         dtype=flow.float32,
         initializer=flow.constant_initializer(0),
     )
     x_var = flow.cast_to_current_logical_view(x_var)
     x = x_var + params_def
     y = flow.dim_gather(x, sample["dim"], index_def)
     flow.optimizer.SGD(
         flow.optimizer.PiecewiseConstantScheduler([], [0.001]), momentum=0
     ).minimize(y)
     flow.watch_diff(x, _compare_diff)
     return y
示例#16
0
 def op_function(x: tp.Numpy.Placeholder(input.shape, dtype=flow.float32)):
     with flow.scope.placement(device_type, "0:0"):
         x_var = flow.get_variable(
             name="input",
             shape=input.shape,
             dtype=flow.float32,
             initializer=flow.constant_initializer(0),
         )
         x_var = flow.cast_to_current_logical_view(x_var)
         input_x = x_var + x
         x_fp32 = flow.cast(input_x, flow.float32)
         x_fp16 = flow.cast(input_x, dtype=flow.float16)
         y_fp16 = flow.reflection_pad2d(x_fp16, padding)
         y_fp32 = flow.cast(y_fp16, dtype=flow.float32)
         flow.optimizer.SGD(
             flow.optimizer.PiecewiseConstantScheduler([], [0]), momentum=0
         ).minimize(y_fp32)
     flow.watch_diff(x_fp32, _compare_diff)
     return y_fp32
示例#17
0
 def gather_fn(
     params_def: oft.Numpy.Placeholder(input.shape, dtype=value_type),
     indices_def: oft.Numpy.Placeholder(index.shape, dtype=index_type),
 ) -> oft.Numpy:
     with flow.scope.placement(device_type, "0:0"):
         x_var = flow.get_variable(
             "input",
             shape=input.shape,
             dtype=value_type,
             initializer=flow.constant_initializer(0),
         )
         x_var = flow.cast_to_current_logical_view(x_var)
         x = x_var + params_def
     y = flow.dim_gather(x, dim, indices_def)
     with flow.scope.placement(device_type, "0:0"):
         flow.optimizer.SGD(
             flow.optimizer.PiecewiseConstantScheduler([], [0.001]), momentum=0
         ).minimize(y)
     flow.watch_diff(x, _compare_diff)
     return y