def matrix_other(): isess = tf.InteractiveSession() X = tf.Variable(tf.eye(3)) W = tf.Variable(tf.random_normal(shape=(3, 3))) X.initializer.run() W.initializer.run() logger.info("X\n%s" % X.eval()) logger.info("W\n%s" % W.eval()) logger.info("tf.div(X,W)\n%s" % tf.div(X, W).eval()) logger.info("tf.truediv(X,W)\n%s" % tf.truediv(X, W).eval()) logger.info("tf.floordiv(X,W)\n%s" % tf.floordiv(X, W).eval()) logger.info("tf.realdiv(X,W)\n%s" % tf.realdiv(X, W).eval()) # logger.info("tf.truncatediv(X,W)\n%s" % tf.truncatediv(X, W).eval()) logger.info("tf.floor_div(X,W)\n%s" % tf.floor_div(X, W).eval()) logger.info("tf.truncatemod(X,W)\n%s" % tf.truncatemod(X, W).eval()) logger.info("tf.floormod(X,W)\n%s" % tf.floormod(X, W).eval()) logger.info("tf.cross(X,W)\n%s" % tf.cross(X, W).eval()) logger.info("tf.add_n(X,W)\n%s" % tf.add_n([X, W]).eval()) logger.info("tf.squared_difference(X,W)\n%s" % tf.squared_difference(X, W).eval()) isess.close()
def preprocess(self, text: str, seq_length: int = 100) -> list: """Split the lyrics of a song into multiple substrings. Preprocess a string containing lyrics, extracting substrings that have a fixed, specified size from it. Return the substrings as sequences of integers rather than characters. :param text: a string containing lyrics :param seq_length: fixed size of each output sequence :return: a list of substrings extracted from the song, as lists of ints """ # convert the string to a list of integers: text_as_chars = tf.strings.bytes_split(text) text_as_int = tf.map_fn(fn=lambda c: self.char2idx.lookup(c), elems=text_as_chars, dtype=tf.int32) text_as_int = tf.boolean_mask(text_as_int, text_as_int > -1) # compute the number of characters in the text: text_size = tf.size(text_as_int) # increase the sequence length by 1, for character-level prediction: seq_length += 1 # create subsequences from the original sequence: trail = tf.truncatemod(text_size, seq_length) n_seqs = tf.truncatediv(text_size, seq_length) to_keep = text_size - trail sequences = tf.reshape(text_as_int[:to_keep], [n_seqs, seq_length]) # shuffle the substrings: sequences = tf.random.shuffle(sequences) return sequences
def clustered_mse(y_true, y_pred): group_by = tf.constant(k) real_size = tf.size(y_pred) remainder = tf.truncatemod(real_size, group_by) #remainder = K.print_tensor(remainder, message="remainder is: ") # ignore the rest y_true = y_true[0:real_size - remainder] y_pred = y_pred[0:real_size - remainder] real_size = tf.size(y_pred) # + 0*remainder #real_size = K.print_tensor(real_size, message="real_size is: ") n = real_size / group_by #n = K.print_tensor(n, message="n is: ") idx = tf.range(n) idx = tf.reshape(idx, [-1, 1]) # Convert to a len(yp) x 1 matrix. idx = tf.tile(idx, [1, group_by]) # Create multiple columns. idx = tf.reshape(idx, [-1]) # Convert back to a vector. idx = tf.cast(idx, tf.int32) y_pred_byK = tf.segment_mean( y_pred, idx) #segment_ids should be the same size as dimension 0 of input. y_true_byK = tf.segment_mean( y_true, idx) #segment_ids should be the same size as dimension 0 of input tmp = K.mean(K.square(y_pred_byK - y_true_byK), axis=-1) return tmp """ # whoops how can it be n/3 sized loss? a = tf.size(y_pred_byK) b = tf.size(y_true_byK) a = tf.cast(a, tf.float32) b = tf.cast(b, tf.float32) a = K.print_tensor(a, message="a is: ") b = K.print_tensor(b, message="b is: ") tmp = y_pred_byK - y_true_byK tmp = K.print_tensor(tmp, message="tmp1 is: ") tmp = K.square(tmp) tmp = K.print_tensor(tmp, message="tmp2 is: ") tmp = K.mean(tmp, axis=-1) tmp = K.print_tensor(tmp, message="tmp3 is: ") tmp = tf.scalar_mul(1+0*a+0*b,tmp) """ return tmp
def t_to_repr(self, wave): """Convert audio signal to representation for neural model :param wave: audio signal [batches_n, samples_n, channels_n] :return audio representation [batches_n, blocks_n+1, freqs_n, channels_n] with samples_n = blocks_n * freq_n """ samples_n = tf.shape(wave)[1] tf.assert_equal( tf.truncatemod(samples_n, self.freq_n), 0, f'Number of samples ({samples_n}) needs to be a multiple of {self.freq_n}' ) return self.mdctransformer.transform(wave)
def extract_frequency_bins_TF(raw_input, _F_START, _F_END, _NUM_BINS, _FS): with tf.name_scope("utils.extract_fbins"): tf.assert_rank( raw_input, 2, message= "Error extracting frequency bins, input tensor must be rank 2, #elec x #samples." ) tf.assert_rank(_F_START, 0, message="_F_START must be scalar") tf.assert_rank(_F_END, 0, message="_F_END must be scalar") # Get input length L = tf.cast(tf.shape(raw_input)[1], tf.float32) NUM_BINS = int(_NUM_BINS.eval()) t_start = tf.cast(((L / _FS) * _F_START), tf.int32) t_end = tf.cast(((L / _FS) * _F_END), tf.int32) asserts = [ tf.assert_equal( tf.truncatemod((t_end - t_start), tf.cast(_NUM_BINS, tf.int32)), 0, message="Error, cannot evenly break up frequency bins") ] with tf.control_dependencies(asserts): raw_fft = tf.fft(tf.cast(raw_input, tf.complex64)) print(tf.shape(raw_fft).eval()) sliced_raw_fft = tf.slice(raw_fft, [0, t_start], [-1, t_end - t_start]) print(tf.shape(sliced_raw_fft).eval()) sliced_raw_fft_t = tf.split(sliced_raw_fft, NUM_BINS, axis=1) sliced_raw_fft_t_abs = tf.abs(sliced_raw_fft_t) sliced_binned_fft = tf.reduce_sum(sliced_raw_fft_t_abs, axis=2) sliced_binned_fft_reduced = tf.transpose(sliced_binned_fft) return sliced_binned_fft_reduced
def simps(y, x=None, dx=1., axis=-1, even='avg'): """ Integrate y(x) using samples along the given axis and the composite Simpson's rule. If x is None, spacing of dx is assumed. If there are an even number of samples, N, then there are an odd number of intervals (N-1), but Simpson's rule requires an even number of intervals. The parameter 'even' controls how this is handled. Parameters ---------- y : array_like Array to be integrated. x : array_like, optional If given, the points at which `y` is sampled. dx : int, optional Spacing of integration points along axis of `y`. Only used when `x` is None. Default is 1. if y is (n1,...,nN, m) and x is (n1,...,nN,m) then integrate y[..., m] x[..., m]. if x is (m,) then integrate all y along given axis. axis : int, optional Axis along which to integrate. Default is the last axis. even : {'avg', 'first', 'str'}, optional 'avg' : Average two results:1) use the first N-2 intervals with a trapezoidal rule on the last interval and 2) use the last N-2 intervals with a trapezoidal rule on the first interval. 'first' : Use Simpson's rule for the first N-2 intervals with a trapezoidal rule on the last interval. 'last' : Use Simpson's rule for the last N-2 intervals with a trapezoidal rule on the first interval. Notes ----- For an odd number of samples that are equally spaced the result is exact if the function is a polynomial of order 3 or less. If the samples are not equally spaced, then the result is exact only if the function is a polynomial of order 2 or less. """ y = tf.cast(y, TFSettings.tf_float) nd = tf.size(tf.shape(y)) N = tf.shape(y)[axis] last_dx = dx first_dx = dx returnshape = 0 if not x is None: x = tf.cast(x, TFSettings.tf_float) def _yes0(x=x): shapex = tf.concat([ tf.reshape(tf.shape(x)[0], (1, )), tf.ones((nd - 1, ), dtype=nd.dtype) ], axis=0) x = tf.reshape(x, shapex) x = swap_axes(x, axis) return x def _no0(x=x): return x x = tf.cond(tf.equal(tf.size(tf.shape(x)), 1), _yes0, _no0) assert even in ['avg', 'last', 'first'] def _even(): val = 0.0 result = 0.0 # Compute using Simpson's rule on first intervals if even in ['avg', 'first']: if x is not None: last_dx = _get(x, axis, -1) - _get(x, axis, -2) #A_ = tf.Print(A_,[tf.shape(A_)]) val += 0.5 * last_dx * (_get(y, axis, -1) + _get(y, axis, -2)) result = _basic_simps(y, 0, N - 3, x, dx, axis) #result = tf.Print(result,[tf.shape(result)]) # Compute using Simpson's rule on last set of intervals if even in ['avg', 'last']: if not x is None: first_dx = _get(x, axis, 1) - _get(x, axis, 0) val += 0.5 * first_dx * (_get(y, axis, 1) + _get(y, axis, 0)) result += _basic_simps(y, 1, N - 2, x, dx, axis) if even == 'avg': val /= 2.0 result /= 2.0 result = result + val return result return tf.cond(tf.equal(tf.truncatemod(N, 2), 0), _even, lambda: _basic_simps(y, 0, N - 2, x, dx, axis))
# 三角函数和反三角函数 tf.cos(x, name=None) tf.sin(x, name=None) tf.tan(x, name=None) tf.acos(x, name=None) tf.asin(x, name=None) tf.atan(x, name=None) # 其它 tf.div(x, y, name=None) # python 2.7 除法, x/y-->int or x/float(y)-->float tf.truediv(x, y, name=None) # python 3 除法, x/y-->float tf.floordiv(x, y, name=None) # python 3 除法, x//y-->int tf.realdiv(x, y, name=None) # 返回一个tensor与x具有相同类型 tf.truncatediv(x, y, name=None) # tf.floor_div(x, y, name=None) tf.truncatemod(x, y, name=None) tf.floormod(x, y, name=None) tf.cross(x, y, name=None) tf.add_n(inputs, name=None) # inputs: A list of Tensor objects, each with same shape and type tf.squared_difference(x, y, name=None) # 对应元素差平方 # 矩阵数学函数 # 矩阵乘法(tensors of rank >= 2) tf.matmul(a, b, transpose_a=False, transpose_b=False, adjoint_a=False, adjoint_b=False, a_is_sparse=False, b_is_sparse=False, name=None) # 转置,可以通过指定 perm=[1, 0] 来进行轴变换 tf.transpose(a, perm=None, name='transpose') # 在张量 a 的最后两个维度上进行转置 tf.matrix_transpose(a, name='matrix_transpose') # Matrix with two batch dimensions, x.shape is [1, 2, 3, 4] # tf.matrix_transpose(x) is shape [1, 2, 4, 3] # 求矩阵的迹
def cond_train_accuracy(): return tf.logical_and( tf.greater(global_step, 0), tf.equal( tf.truncatemod(global_step, constants.TRAIN_ACCURACY_FREQUENCY), 0))
y = tf.constant([[3, 3, 3]], tf.float64) z = tf.mod(x, y) sess = tf.Session() print(sess.run(z)) sess.close() # z==>[[ 2.1 1.1 1.9] [ 2.1 2.2 2.3]] """tf.truncatemod(x,y,name=None) 功能:对应位置元素的截断除法取余运算。 输入:x,y具有相同尺寸的tensor,可以为float32`, `float64`, `int32`, `int64`类型""" x = tf.constant([[2.1, 4.1, -1.1]], tf.float64) y = tf.constant([[3, 3, 3]], tf.float64) z = tf.truncatemod(x, y) sess = tf.Session() print(sess.run(z)) sess.close() # z==>[[2.1 1.1 -1.1]] """tf.floormod(x,y,name=None) # 功能:对应位置元素的地板除法取余运算。 # 输入:x,y具有相同尺寸的tensor,可以为float32`, `float64`, `int32`, `int64`类型。""" x = tf.constant([[2.1, 4.1, -1.1]], tf.float64) y = tf.constant([[3, 3, 3]], tf.float64) z = tf.floormod(x, y) sess = tf.Session() print(sess.run(z)) sess.close()
from itertools import product import torch as th m = 3 n = 3 x = [[list(i[x:x + m]) for x in range(0, len(i), m)] for i in product("01", repeat=m * n)] kernel = th.tensor(np.matrix(x[199]).astype(int)) central = th.remainder(th.sum(kernel), 19) kernel[1][1] = central import numpy as np for i in range(0, len(x)): print(np.mod(np.matrix(x[i]).astype(int).sum(), 2)) import tensorflow as tf kernel = tf.convert_to_tensor(np.matrix(x[199]).astype(int)) central = tf.truncatemod(tf.reduce_sum(kernel), 2) kernel.numpy()[1][1] = central
def apply_gradients(self, grads_and_vars, global_step, name=None): """See base class.""" assignments = [] for (grad, param) in grads_and_vars: if grad is None or param is None: continue param_name = self._get_variable_name(param.name) m = tf.get_variable( name=param_name + "/adam_m", shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer()) v = tf.get_variable( name=param_name + "/adam_v", shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer()) g = tf.get_variable( name=param_name + "/acumu_gradient", shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer()) if self.update_freq == 1: # Standard Adam update. next_m = ( tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad)) next_v = ( tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2, tf.square(grad))) update = next_m / (tf.sqrt(next_v) + self.epsilon) # Just adding the square of the weights to the loss function is *not* # the correct way of using L2 regularization/weight decay with Adam, # since that will interact with the m and v parameters in strange ways. # # Instead we want ot decay the weights in a manner that doesn't interact # with the m/v parameters. This is equivalent to adding the square # of the weights to the loss with plain (non-momentum) SGD. if self._do_use_weight_decay(param_name): update += self.weight_decay_rate * param update_with_lr = self.learning_rate * update next_param = param - update_with_lr assignments.extend( [param.assign(next_param), m.assign(next_m), v.assign(next_v)]) else: update_cond = tf.reduce_any(tf.equal( tf.truncatemod(global_step, self.update_freq), 0)) update_cond_g = tf.reduce_any(tf.equal( tf.truncatemod(global_step, self.update_freq), 1)) next_g = tf.cond(update_cond_g, lambda: (1 / float(self.update_freq)) * grad, lambda: (1 / float(self.update_freq)) * grad + g) next_m = tf.cond(update_cond, lambda: (tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, next_g)), lambda: m) next_v = tf.cond(update_cond, lambda: (tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2, tf.square(next_g))), lambda: v) update = next_m / (tf.sqrt(next_v) + self.epsilon) if self._do_use_weight_decay(param_name): update += self.weight_decay_rate * param update_with_lr = self.learning_rate * update next_param = tf.cond(update_cond, lambda: param - update_with_lr, lambda: param) assignments.extend( [param.assign(next_param), m.assign(next_m), v.assign(next_v), g.assign(next_g)]) return tf.group(*assignments, name=name)