def test_mathops_ten(): mathops_10 = MathOpsTen(seed=19) in_node_1 = mathops_10.get_placeholder("input_1") #in_node_2 = mathops_8.get_placeholder("input_2") n0 = tf.is_finite(in_node_1) n1 = tf.reduce_all(n0) n2 = tf.cast(n0, dtype=tf.float64) n3 = tf.cast(n1, dtype=tf.float64) n4 = tf.add(n2, n3) n5 = tf.cast(tf.truncatediv(tf.cast(n4, dtype=tf.int32), 3), dtype=tf.float64) n6 = tf.reciprocal(n5) # should be inf now n7 = tf.cast(tf.is_inf(n6), dtype=tf.float64) n8 = tf.cast(tf.is_nan(n6), dtype=tf.float64) n9 = tf.squared_difference(n7, n8) w = tf.Variable(tf.random_normal([4, 3], dtype=tf.float64), name="w") n10 = tf.reverse(w, axis=[-1]) n11 = tf.add(n10, n9) n12 = tf.reciprocal(tf.multiply(n11, [[0, 1, 1], [1, 1, 1], [0, 1, 0], [1, 0, 0]])) #n13 = tf.reduce_any(tf.is_inf(n12)) #n14 = tf.cast(n13, dtype=tf.float64) out_node = tf.identity(n12, name="output") placeholders = [in_node_1] predictions = [out_node] # Run and persist tfp = TensorFlowPersistor(save_dir="g_10") predictions_after_freeze = tfp.set_placeholders(placeholders) \ .set_output_tensors(predictions) \ .set_test_data(mathops_10.get_test_data()) \ .build_save_frozen_graph() print(predictions_after_freeze[0].shape)
def preprocess(self, text: str, seq_length: int = 100) -> list: """Split the lyrics of a song into multiple substrings. Preprocess a string containing lyrics, extracting substrings that have a fixed, specified size from it. Return the substrings as sequences of integers rather than characters. :param text: a string containing lyrics :param seq_length: fixed size of each output sequence :return: a list of substrings extracted from the song, as lists of ints """ # convert the string to a list of integers: text_as_chars = tf.strings.bytes_split(text) text_as_int = tf.map_fn(fn=lambda c: self.char2idx.lookup(c), elems=text_as_chars, dtype=tf.int32) text_as_int = tf.boolean_mask(text_as_int, text_as_int > -1) # compute the number of characters in the text: text_size = tf.size(text_as_int) # increase the sequence length by 1, for character-level prediction: seq_length += 1 # create subsequences from the original sequence: trail = tf.truncatemod(text_size, seq_length) n_seqs = tf.truncatediv(text_size, seq_length) to_keep = text_size - trail sequences = tf.reshape(text_as_int[:to_keep], [n_seqs, seq_length]) # shuffle the substrings: sequences = tf.random.shuffle(sequences) return sequences
def _body(jl, ju, value, array): jm = tf.truncatediv(ju + jl, 2) # compute a midpoint, #jm = tf.Print(jm,[jl,jm,ju]) value_ = tf.gather(array, jm) #array[jl] <= value < array[ju] #value_ = tf.Print(value_,[value_,value]) jl = tf.where(value >= value_, jm, jl) ju = tf.where(value < value_, jm, ju) return (jl, ju, value, array)
def length_penalty(sequence_lengths, penalty_factor): """Calculates the length penalty according to https://arxiv.org/abs/1609.08144 Args: sequence_lengths: The sequence length of all hypotheses, a tensor of shape [beam_size, vocab_size]. penalty_factor: A scalar that weights the length penalty. Returns: The length penalty factor, a tensor fo shape [beam_size]. """ return tf.truncatediv( (5. + tf.cast(sequence_lengths, dtype=tf.float32))**penalty_factor, (5. + 1.)**penalty_factor)
def wav_to_spectrogram(wav_data): # [samples_n, channels_n] tf.assert_equal( tf.shape(wav_data)[1], model.channels_n[-1], f"Audio data has {tf.shape(wav_data)[1]} channels, but needs to have {model.channels_n[-1]} for the model" ) wav_data.set_shape([None, model.channels_n[-1]]) # truncate audio (only a tiny bit) samples_n = tf.shape(wav_data)[0] last_block = tf.truncatediv(samples_n, model.freq_n[-1]) wav_data = wav_data[:last_block * model.freq_n[-1], :] wav_data = tf.expand_dims( wav_data, axis=0) # add batch dimension (t_to_repr expects it) mdct_norm = audio_representation.t_to_repr(wav_data) # -1..1 # [batches_n=1, blocks_n, freqs_n, channels_n] return mdct_norm
def get_key_points_from_score_maps(score_maps): """ Arguments: @score_maps: of shape (N, H, W, C) Return: @key_points_xy: of shape (N, C, 2) @key_points_scores: of shape (N, C) """ N, _, W, C = score_maps.shape.as_list() assert C == NUM_KEY_POINTS max_flat = tf.argmax(tf.reshape(score_maps, [N, -1, C]), axis=1) max_y = tf.truncatediv(max_flat, W) max_x = max_flat - max_y * W key_points_xy = tf.stack([max_x, max_y], axis=2) key_points_scores = tf.squeeze(tf.reduce_max(score_maps, axis=[1, 2])) return key_points_xy, key_points_scores
x = tf.add(a, b, name='add') writer = tf.summary.FileWriter('./graphs/simple', tf.get_default_graph()) with tf.Session() as sess: print(sess.run(x)) writer.close() # Example 2: The wonderful wizard of div a = tf.constant([2, 2], name='a') b = tf.constant([[0, 1], [2, 3]], name='b') with tf.Session() as sess: print(sess.run(tf.div(b, a))) print(sess.run(tf.divide(b, a))) print(sess.run(tf.truediv(b, a))) print(sess.run(tf.floordiv(b, a))) print(sess.run(tf.truncatediv(b, a))) print(sess.run(tf.floor_div(b, a))) # Example 3: multiplying tensors a = tf.constant([10, 20], name='a') b = tf.constant([2, 3], name='b') with tf.Session() as sess: print(sess.run(tf.multiply(a, b))) print(sess.run(tf.tensordot(a, b, 1))) # Example 4: Python native type t_0 = 19 x = tf.zeros_like(t_0) y = tf.ones_like(t_0)
writer = tf.summary.FileWriter('./graphs/simple', tf.get_default_graph()) with tf.Session() as sess: # writer=tf.summary.FileWriter('./graphs',sess.graph) print(sess.run(x)) writer.close() # 例子2:div的奇思妙用 a = tf.constant([2, 2], name='a') b = tf.constant([[0, 1], [2, 3]], name='b') with tf.Session() as sess: print(sess.run(tf.div(b, a))) # 对应元素相除, 取商数 print(sess.run(tf.divide(b, a))) # 对应元素相除 print(sess.run(tf.truediv(b, a))) # 对应元素 相除 # print(sess.run(tf.realdiv(b, a))) print(sess.run(tf.floordiv(b, a))) # 结果向下取整, 但结果dtype与输入保持一致 print(sess.run(tf.truncatediv(b, a))) # 对应元素 截断除 取余 print(sess.run(tf.floor_div(b, a))) # 例子3:乘法 a = tf.constant([10, 20], name='a') b = tf.constant([2, 3], name='b') with tf.Session() as sess: print(sess.run(tf.multiply(a, b))) print(sess.run(tf.tensordot(a, b, 1))) # 例子4:Python 基础数据类型 t_0 = 19 x = tf.zeros_like(t_0) y = tf.ones_like(t_0) print(x) print(y)
tf.minimum(x, y, name=None) # 返回两tensor中的最小值 (x < y ? x : y) # 三角函数和反三角函数 tf.cos(x, name=None) tf.sin(x, name=None) tf.tan(x, name=None) tf.acos(x, name=None) tf.asin(x, name=None) tf.atan(x, name=None) # 其它 tf.div(x, y, name=None) # python 2.7 除法, x/y-->int or x/float(y)-->float tf.truediv(x, y, name=None) # python 3 除法, x/y-->float tf.floordiv(x, y, name=None) # python 3 除法, x//y-->int tf.realdiv(x, y, name=None) # 返回一个tensor与x具有相同类型 tf.truncatediv(x, y, name=None) # tf.floor_div(x, y, name=None) tf.truncatemod(x, y, name=None) tf.floormod(x, y, name=None) tf.cross(x, y, name=None) tf.add_n(inputs, name=None) # inputs: A list of Tensor objects, each with same shape and type tf.squared_difference(x, y, name=None) # 对应元素差平方 # 矩阵数学函数 # 矩阵乘法(tensors of rank >= 2) tf.matmul(a, b, transpose_a=False, transpose_b=False, adjoint_a=False, adjoint_b=False, a_is_sparse=False, b_is_sparse=False, name=None) # 转置,可以通过指定 perm=[1, 0] 来进行轴变换 tf.transpose(a, perm=None, name='transpose') # 在张量 a 的最后两个维度上进行转置 tf.matrix_transpose(a, name='matrix_transpose') # Matrix with two batch dimensions, x.shape is [1, 2, 3, 4]
* tf.substract: 减法 * tf.multiply: 乘法 * tf.div: 除法,服从python2的除法规则,x/y,如果x,y都是整数结果为int类型,否则结果为float * tf.divide:除法 * tf.truediv: 使用python3的除法操作语义,x/y, 结果为float,如果要使用整除,使用x//y或tf.floordiv * tf.floordiv: 除法,取整 * tf.truncatediv * tf.mod: 取模(取余) ''' x=tf.constant(7) y=tf.constant(4) mul=tf.multiply(x,y) div=tf.div(x,y) divide=tf.divide(x,y) truediv=tf.truediv(x,y) truncatediv=tf.truncatediv(x,y) sess=tf.Session() out=sess.run([mul,div,divide,truediv,truncatediv]) print(out) ''' 2. pairwise 交叉相乘 * tf.cross(a,b,name): 计算a,b中元素的两两之间的乘积 ''' a=tf.constant([1,2,3]) b=tf.constant([0,2,3]) cross=tf.cross(a,b) out=sess.run(cross) print(out)
''' TensorFlow’s zillion operations for division. tf.div - TF style division tf.division - Python's style division ''' a = tf.constant([2, 2], name='a') b = tf.constant([[0, 1], [2, 3]], name='b') with tf.Session() as sess: print(sess.run(tf.div(b, a))) # ==> [[0 0] [1 1]] print(sess.run(tf.divide(b, a))) # ==> [[0. 0.5] [1. 1.5]] print(sess.run(tf.truediv(b, a))) # ==> [[0. 0.5] [1. 1.5]] print(sess.run(tf.floordiv(b, a))) # ==> [[0 0] [1 1]] # print(sess.run(tf.realdiv(b, a))) # ErrorL only works for real values print(sess.run(tf.truncatediv(b, a))) # ==> [[0 0] [1 1]] print(sess.run(tf.floor_div(b, a))) # ==> [[0 0] [1 1]] ''' tf.add_n: Allow to add multiple tensors. tf.add_n([1, b, c] => a + b + c ''' a = tf.constant(1, name="a") b = tf.constant(2, name="b") c = tf.constant(3, name="c") add_n = tf.add_n([a, b, c]) with tf.Session() as sess: print(sess.run(add_n))
with tf.Session() as sess: # writer = tf.summary.FileWriter('./graphs', sess.graph) print(sess.run(x)) writer.close() # close the writer when you’re done using it # Example 2: The wonderful wizard of div a = tf.constant([2, 2], name='a') b = tf.constant([[0, 1], [2, 3]], name='b') with tf.Session() as sess: print(sess.run(tf.div(b, a))) print(sess.run(tf.divide(b, a))) print(sess.run(tf.truediv(b, a))) print(sess.run(tf.floordiv(b, a))) # print(sess.run(tf.realdiv(b, a))) print(sess.run(tf.truncatediv(b, a))) print(sess.run(tf.floor_div(b, a))) # Example 3: multiplying tensors a = tf.constant([10, 20], name='a') b = tf.constant([2, 3], name='b') with tf.Session() as sess: print(sess.run(tf.multiply(a, b))) print(sess.run(tf.tensordot(a, b, 1))) # Example 4: Python native type t_0 = 19 x = tf.zeros_like(t_0) # ==> 0 y = tf.ones_like(t_0) # ==> 1
def beam_search_step(time_, logits, beam_state, config): """Performs a single step of Beam Search Decoding. Args: time_: Beam search time step, should start at 0. At time 0 we assume that all beams are equal and consider only the first beam for continuations. logits: Logits at the current time step. A tensor of shape `[B, vocab_size]` beam_state: Current state of the beam search. An instance of `BeamState` config: An instance of `BeamSearchConfig` Returns: A new beam state. """ # Calculate the current lengths of the predictions prediction_lengths = beam_state.lengths previously_finished = beam_state.finished # Calculate the total log probs for the new hypotheses # Final Shape: [beam_width, vocab_size] probs = tf.nn.log_softmax(logits) probs = mask_probs(probs, config.eos_token, previously_finished) total_probs = tf.expand_dims(beam_state.log_probs, 1) + probs # Calculate the continuation lengths # We add 1 to all continuations that are not EOS and were not # finished previously lengths_to_add = tf.one_hot([config.eos_token] * config.beam_width, config.vocab_size, 0, 1) add_mask = (1 - tf.cast(previously_finished, dtype=tf.int32)) lengths_to_add = tf.expand_dims(add_mask, 1) * lengths_to_add new_prediction_lengths = tf.expand_dims(prediction_lengths, 1) + lengths_to_add # Calculate the scores for each beam scores = hyp_score(log_probs=total_probs, sequence_lengths=new_prediction_lengths, config=config) scores_flat = tf.reshape(scores, [-1]) # During the first time step we only consider the initial beam scores_flat = tf.cond(pred=tf.convert_to_tensor(value=time_) > 0, true_fn=lambda: scores_flat, false_fn=lambda: scores[0]) # Pick the next beams according to the specified successors function next_beam_scores, word_indices = config.choose_successors_fn( scores_flat, config) next_beam_scores.set_shape([config.beam_width]) word_indices.set_shape([config.beam_width]) # Pick out the probs, beam_ids, and states according to the chosen predictions total_probs_flat = tf.reshape(total_probs, [-1], name="total_probs_flat") next_beam_probs = tf.gather(total_probs_flat, word_indices) next_beam_probs.set_shape([config.beam_width]) next_word_ids = tf.math.mod(word_indices, config.vocab_size) next_beam_ids = tf.truncatediv(word_indices, config.vocab_size) # Append new ids to current predictions next_finished = tf.logical_or( tf.gather(beam_state.finished, next_beam_ids), tf.equal(next_word_ids, config.eos_token)) # Calculate the length of the next predictions. # 1. Finished beams remain unchanged # 2. Beams that are now finished (EOS predicted) remain unchanged # 3. Beams that are not yet finished have their length increased by 1 lengths_to_add = tf.cast(tf.not_equal(next_word_ids, config.eos_token), dtype=tf.int32) lengths_to_add = (1 - tf.cast(next_finished, dtype=tf.int32)) * lengths_to_add next_prediction_len = tf.gather(beam_state.lengths, next_beam_ids) next_prediction_len += lengths_to_add next_state = BeamSearchState(log_probs=next_beam_probs, lengths=next_prediction_len, finished=next_finished) output = BeamSearchStepOutput(scores=next_beam_scores, predicted_ids=next_word_ids, beam_parent_ids=next_beam_ids) return output, next_state
t2_1 = tf.ones_like(t2, name="t2_1") # 3x3 tensor, all elem = True # a = tf.constant([2, 2], name='a') b = tf.constant([[0, 1], [2, 3]], name='b') c = tf.constant([10, 20], name='c') d = tf.constant([2, 3], name='d') ops = [ # division tf.div(b, a), tf.divide(b, a), tf.truediv(b, a), tf.floordiv(b, a), # tf.realdiv(b, a) # nly for real value tf.truncatediv(b, a), # moltiplication tf.multiply(c, d), # element-wise tf.tensordot(c, d, 1), # output 1 value tf.add(t1_0, t1_1) ] writer = tf.summary.FileWriter(utils.logdir, tf.get_default_graph()) with tf.Session() as sess: # run() output is a numpy array [print(sess.run(op)) for op in ops] writer.close()