def diag_job( input_tensor: tp.Numpy.Placeholder(shape=(input_shape), dtype=flow.float), ) -> tp.Numpy: input_var = flow.get_variable( "input_tensor", shape=(input_shape), dtype=flow.float, initializer=flow.zeros_initializer(), trainable=True, ) input_tensor = input_tensor + input_var input_tensor = flow.cast_to_current_logical_view(input_tensor) input_tensor = flow.cast(input_tensor, type_name_to_flow_type[dtype]) output = flow.diag(input_tensor, dim) if (output.dtype == flow.int64 or output.dtype == flow.int8 or output.dtype == flow.int32): output = flow.cast(output, flow.float) flow.optimizer.Adam( flow.optimizer.PiecewiseConstantScheduler([], [1e-4])).minimize(output) flow.watch(input_tensor, test_global_storage.Setter("x")) flow.watch_diff(input_tensor, test_global_storage.Setter("x_diff")) flow.watch(output, test_global_storage.Setter("output")) flow.watch_diff(output, test_global_storage.Setter("output_diff")) return output
def diag_job( input: tp.Numpy.Placeholder(shape=input_shape, dtype=flow.float), ) -> tp.Numpy: with flow.scope.placement(device_type, "0:0"): input_var = flow.get_variable( "input", shape=input_shape, dtype=flow.float, initializer=flow.zeros_initializer(), trainable=True, ) input = input + input_var flow.watch_diff(input, assert_diag_grad) output = flow.diag(input, diagonal) if output.dtype in (flow.int8, flow.int32, flow.int64): output = flow.cast(output, flow.float) with flow.scope.placement(device_type, "0:0"): flow.optimizer.Adam( flow.optimizer.PiecewiseConstantScheduler([], [1e-4]) ).minimize(output) return output
def _diag(self, diagonal=0): return flow.diag(self, diagonal=diagonal)