def Rtt_default_variable_creator(next_creator=None, **kwargs): """Default variable creator.""" assert next_creator is None initial_value = kwargs.get("initial_value", None) trainable = kwargs.get("trainable", None) collections = kwargs.get("collections", None) validate_shape = kwargs.get("validate_shape", True) caching_device = kwargs.get("caching_device", None) name = kwargs.get("name", None) variable_def = kwargs.get("variable_def", None) dtype = kwargs.get("dtype", None) expected_shape = kwargs.get("expected_shape", None) import_scope = kwargs.get("import_scope", None) constraint = kwargs.get("constraint", None) use_resource = kwargs.get("use_resource", None) synchronization = kwargs.get("synchronization", None) aggregation = kwargs.get("aggregation", None) shape = kwargs.get("shape", None) initial_value = convert_init_value_to_string(initial_value, dtype) if use_resource is None: use_resource = variable_scope.get_variable_scope().use_resource if use_resource is None: use_resource = variable_scope._DEFAULT_USE_RESOURCE use_resource = use_resource or context.executing_eagerly() if use_resource: distribute_strategy = kwargs.get("distribute_strategy", None) return rtt_ts.convert_to_rtttensor( resource_variable_ops.ResourceVariable( initial_value=initial_value, trainable=trainable, collections=collections, validate_shape=validate_shape, caching_device=caching_device, name=name, dtype=dtype, constraint=constraint, variable_def=variable_def, import_scope=import_scope, distribute_strategy=distribute_strategy, synchronization=synchronization, aggregation=aggregation, shape=shape)) else: return rtt_ts.convert_to_rtttensor( variables.RefVariable(initial_value=initial_value, trainable=trainable, collections=collections, validate_shape=validate_shape, caching_device=caching_device, name=name, dtype=dtype, constraint=constraint, variable_def=variable_def, expected_shape=expected_shape, import_scope=import_scope, synchronization=synchronization, aggregation=aggregation, shape=shape))
def rtt_fused_batch_norm(x, scale, offset, mean, variance, epsilon=0.0001, data_format="NHWC", is_training=True, name=None): x = rtt_ts.convert_to_rtttensor(x) scale = rtt_ts.convert_to_rtttensor(scale) offset = rtt_ts.convert_to_rtttensor(offset) mean = rtt_ts.convert_to_rtttensor(mean) variance = rtt_ts.convert_to_rtttensor(variance) y, batch_mean, batch_var, _, _ = rtt_ts.rtt_ops.rtt_fused_batch_norm( x, scale, offset, mean, variance, epsilon=epsilon, data_format=data_format, is_training=is_training, name=name) return rtt_ts.RttTensor(y), rtt_ts.RttTensor(batch_mean), rtt_ts.RttTensor( batch_var), _, _
def rtt_bias_add(value, bias, data_format="NHWC", name=None): value = rtt_ts.convert_to_rtttensor(value) bias = rtt_ts.convert_to_rtttensor(bias) _result = rtt_ts.rtt_ops.rtt_bias_add(value._raw, bias._raw, data_format=data_format, name=name) return rtt_ts.RttTensor(_result)
def rtt_matmul(x, y, transpose_a=False, transpose_b=False, name=None): """Multiplies matrix `a` by matrix `b`, producing `a` * `b`.""" x = rtt_ts.convert_to_rtttensor(x) y = rtt_ts.convert_to_rtttensor(y) _result = rtt_ts.rtt_ops.rtt_matmul(x._raw, y._raw, transpose_a=transpose_a, transpose_b=transpose_b, name=name) return rtt_ts.RttTensor(_result)
def Rtt_default_variable_creator_v2(next_creator=None, **kwargs): """Default variable creator.""" assert next_creator is None initial_value = kwargs.get("initial_value", None) trainable = kwargs.get("trainable", None) validate_shape = kwargs.get("validate_shape", True) caching_device = kwargs.get("caching_device", None) name = kwargs.get("name", None) variable_def = kwargs.get("variable_def", None) dtype = kwargs.get("dtype", None) import_scope = kwargs.get("import_scope", None) constraint = kwargs.get("constraint", None) distribute_strategy = kwargs.get("distribute_strategy", None) synchronization = kwargs.get("synchronization", None) aggregation = kwargs.get("aggregation", None) shape = kwargs.get("shape", None) initial_value = convert_init_value_to_string(initial_value, dtype) return rtt_ts.convert_to_rtttensor( resource_variable_ops.ResourceVariable( initial_value=initial_value, trainable=trainable, validate_shape=validate_shape, caching_device=caching_device, name=name, dtype=dtype, constraint=constraint, variable_def=variable_def, import_scope=import_scope, distribute_strategy=distribute_strategy, synchronization=synchronization, aggregation=aggregation, shape=shape))
def _softmax(logits, compute_op, dim=-1, name=None): logits = rtt_ts.convert_to_rtttensor(logits) def _swap_axis(logits, dim_index, last_index, name=None): """Swaps logits's dim_index and last_index.""" return array_ops.transpose( logits, array_ops.concat([ math_ops.range(dim_index), [last_index], math_ops.range(dim_index + 1, last_index), [dim_index] ], 0), name=name) # We need its original shape for shape inference. shape = logits._raw.get_shape() is_last_dim = (dim == -1) or (dim == shape.ndims - 1) if is_last_dim: _result = compute_op(logits, name=name) return rtt_ts.RttTensor(_result) dim_val = dim if isinstance(dim, ops.Tensor): dim_val = tensor_util.constant_value(dim) elif isinstance(dim, rtt_ts.RttTensor): dim_val = tensor_util.constant_value(dim._raw) if dim_val is not None and not -shape.ndims <= dim_val < shape.ndims: raise errors_impl.InvalidArgumentError( None, None, "Dimension (%d) must be in the range [%d, %d) where %d is the number of" " dimensions in the input." % (dim_val, -shape.ndims, shape.ndims, shape.ndims)) # In case dim is negative (and is not last dimension -1), add shape.ndims ndims = array_ops.rank(logits._raw) if not isinstance(dim, ops.Tensor): if dim < 0: dim += ndims else: dim = array_ops.where(math_ops.less(dim, 0), dim + ndims, dim) # Swap logits' dimension of dim and its last dimension. input_rank = array_ops.rank(logits._raw) dim_axis = dim % shape.ndims logits = _swap_axis(logits._raw, dim_axis, math_ops.subtract(input_rank, 1)) # Do the actual softmax on its last dimension. _result = compute_op(logits, name=name) # If dim is not the last dimension, we have to do a transpose so that we can # still perform softmax on its last dimension. _result = _swap_axis(_result, dim_axis, math_ops.subtract(input_rank, 1), name=name) # Make shape inference work since transpose may erase its static shape. _result.set_shape(shape) return rtt_ts.RttTensor(_result)
def RttAssignSub(ref, value, use_locking=None, name=None): """Update `ref` by subtracting `value` from it.""" value = rtt_ts.convert_to_rtttensor(value) ref = _get_rtt_var(ref) if ref.dtype._is_ref_dtype: return rtt_ts.rtt_ops.rtt_assign_sub(ref, value, use_locking=use_locking, name=name) return ref.assign_sub(value)
def rtt_arg_max(input, dimension=None, name=None, output_type=dtypes.string): if dimension is None: dimension = 0 input = rtt_ts.convert_to_rtttensor(input) _result = rtt_ts.rtt_ops.rtt_arg_max(input, dimension=dimension, name=name, output_type=output_type) return rtt_ts.RttTensor(_result)
def RttAssign(ref, value, validate_shape=None, use_locking=None, name=None): """Update `ref` by assigning `value` to it.""" value = rtt_ts.convert_to_rtttensor(value) ref = _get_rtt_var(ref) if ref.dtype._is_ref_dtype: return gen_state_ops.assign( ref, value._raw, use_locking=use_locking, name=name, validate_shape=validate_shape) return ref.assign(value._raw, name=name)
def rtt_conv2d(input, filter, strides=None, padding=None, use_cudnn_on_gpu=False, explicit_paddings=[], data_format="NHWC", dilations=[1, 1, 1, 1], name=None): input = rtt_ts.convert_to_rtttensor(input) filter = rtt_ts.convert_to_rtttensor(filter) _result = rtt_ts.rtt_ops.rtt_conv2d(input._raw, filter._raw, strides=strides, padding=padding, use_cudnn_on_gpu=use_cudnn_on_gpu, explicit_paddings=explicit_paddings, data_format=data_format, dilations=None, name=name) return rtt_ts.RttTensor(_result)
def rtt_max_pool(value, ksize, strides, padding, data_format="NHWC", name=None): value = rtt_ts.convert_to_rtttensor(value) _result = rtt_ts.rtt_ops.rtt_max_pool(value, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name) return rtt_ts.RttTensor(_result)
def rtt_mean( input_tensor, axis=None, keepdims=None, name=None, reduction_indices=None, keep_dims=None, ): """Computes the mean of elements across dimensions of a tensor.""" keepdims = False if keepdims is None else keepdims axis = math_ops._ReductionDims(input_tensor, axis) input_tensor = rtt_ts.convert_to_rtttensor(input_tensor) _result = rtt_ts.rtt_ops.rtt_reduce_mean(input_tensor, reduction_indices=axis, name=name, keep_dims=keepdims) return rtt_ts.RttTensor(_result)
def rtt_sum( input_tensor, axis=None, keepdims=None, name=None, reduction_indices=None, keep_dims=None, ): """Computes the sum of elements across dimensions of a tensor.""" keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, "keep_dims", keep_dims) keepdims = False if keepdims is None else keepdims axis = math_ops._ReductionDims(input_tensor, axis) input_tensor = rtt_ts.convert_to_rtttensor(input_tensor) _result = rtt_ts.rtt_ops.rtt_reduce_sum(input_tensor, reduction_indices=axis, name=name, keep_dims=keepdims) return rtt_ts.RttTensor(_result)
def rtt_square(x, name=None): """Computes square of x element-wise.""" x = rtt_ts.convert_to_rtttensor(x) _result = rtt_ts.rtt_ops.rtt_square(x._raw, name=name) return rtt_ts.RttTensor(_result)
def rtt_sigmoid(x, name=None): """Computes sigmoid of `x` element-wise. Specifically, `y = 1 / (1 + exp(-x))`.""" x = rtt_ts.convert_to_rtttensor(x) _result = rtt_ts.rtt_ops.rtt_sigmoid(x._raw, name=name) return rtt_ts.RttTensor(_result)
def rtt_notequal(x, y, name=None): """Returns the truth value of (x != y) element-wise.""" x = rtt_ts.convert_to_rtttensor(x) y = rtt_ts.convert_to_rtttensor(y) _result = rtt_ts.rtt_ops.rtt_not_equal(x._raw, y._raw, name=name) return rtt_ts.RttTensor(_result)
def rtt_realdiv(x, y, name=None): """Returns x / y element-wise for real types.""" x = rtt_ts.convert_to_rtttensor(x) y = rtt_ts.convert_to_rtttensor(y) _result = rtt_ts.rtt_ops.rtt_realdiv(x._raw, y._raw, name=name) return rtt_ts.RttTensor(_result)
def rtt_truediv(x, y, name=None): """Divides x / y elementwise (using Python 3 division operator semantics).""" x = rtt_ts.convert_to_rtttensor(x) y = rtt_ts.convert_to_rtttensor(y) _result = rtt_ts.rtt_ops.rtt_truediv(x._raw, y._raw, name=name) return rtt_ts.RttTensor(_result)
def rtt_floordiv(x, y, name=None): """Divides `x / y` elementwise, rounding toward the most negative integer.""" x = rtt_ts.convert_to_rtttensor(x) y = rtt_ts.convert_to_rtttensor(y) _result = rtt_ts.rtt_ops.rtt_floordiv(x._raw, y._raw, name=name) return rtt_ts.RttTensor(_result)
def rtt_mul(x, y, name=None): """Returns x * y element-wise.""" x = rtt_ts.convert_to_rtttensor(x) y = rtt_ts.convert_to_rtttensor(y) _result = rtt_ts.rtt_ops.rtt_mul(x._raw, y._raw, name=name) return rtt_ts.RttTensor(_result)
def rtt_relu(x, name=None): """Computes rectified linear: `max(features, 0)`.""" x = rtt_ts.convert_to_rtttensor(x) _result = rtt_ts.rtt_ops.rtt_relu(x._raw, name=name) return rtt_ts.RttTensor(_result)
def rtt_abs(x, name=None): """Computes the absolute value of a tensor.""" x = rtt_ts.convert_to_rtttensor(x) _result = rtt_ts.rtt_ops.rtt_abs(x._raw, name=name) return rtt_ts.RttTensor(_result)
def rtt_neg(x, name=None): """Computes numerical negative value element-wise.""" x = rtt_ts.convert_to_rtttensor(x) _result = rtt_ts.rtt_ops.rtt_negative(x._raw, name=name) return rtt_ts.RttTensor(_result)
def rtt_log1p(x, name=None): """Computes natural logarithm of (1 + x) element-wise.""" x = rtt_ts.convert_to_rtttensor(x) _result = rtt_ts.rtt_ops.rtt_log1p(x._raw, name=name) return rtt_ts.RttTensor(_result)
def rtt_pow(x, y, name=None): """Computes the power of one value to another.""" x = rtt_ts.convert_to_rtttensor(x) y = rtt_ts.convert_to_rtttensor(y) _result = rtt_ts.rtt_ops.rtt_pow(x._raw, y._raw, name=name) return rtt_ts.RttTensor(_result)
def rtt_l2_loss(x, name=None): x = rtt_ts.convert_to_rtttensor(x) _result = rtt_ts.rtt_ops.rtt_l2_loss(x._raw, name=name) return rtt_ts.RttTensor(_result)
def rtt_logical_and(x, y, name=None): """Returns the truth value of (x & y) element-wise..""" x = rtt_ts.convert_to_rtttensor(x) y = rtt_ts.convert_to_rtttensor(y) _result = rtt_ts.rtt_ops.rtt_logical_and(x._raw, y._raw, name=name) return rtt_ts.RttTensor(_result)
def rtt_logical_not(x, name=None): """Returns the truth value of (!x) element-wise.""" x = rtt_ts.convert_to_rtttensor(x) _result = rtt_ts.rtt_ops.rtt_logical_not(x._raw, name=name) return rtt_ts.RttTensor(_result)