def tf_is_emotion(image_file, emotion, match_emotion): [ match, ] = tf.py_function(lambda x: is_emotion(x, match_emotion), [emotion], [tf.bool]) return match
def tf_serialize_example(f0, f1): tf_string = tf.py_function( serialize_example, (f0, f1), # pass these args to the above function. tf.string) # the return type is `tf.string`. return tf.reshape(tf_string, ()) # The result is a scalar
def load_from_txt(X_fpath, Y_fpath, word2vec_fpath, vocab_fpath, reverse_context=True, buffer_size=10000, batch_size=32): def txt_generator(): with open(X_fpath, 'r') as f: X = f.readlines() with open(Y_fpath, 'r') as f: Y = f.readlines() print('Loaded data from txt file') X = [x.strip('\n') for x in X] Y = [y.strip('\n') for y in Y] print('Removed new line characters') # Pipeline elements ws = WhiteSpaceTokenizer() padder = Padder('<pad>') pipe = Pipeline(steps=[('ws', ws), ('pad', padder)]) X_processed = pipe.transform(X) Y_processed = pipe.transform(Y) print('ws tokenized and padded') for i, x in enumerate(X_processed): yield (x, Y_processed[i]) # Word2Vec used to vectorise the encoder and decoder inputs word2vec = Word2Vec(special_vectors={'unknown': 0, '<sos>': 1}) model = KeyedVectors.load_word2vec_format(word2vec_fpath) word2vec.set_model(model) print('Loaded word2vec mapping') inttk = IntegerTokenizer(vocab_fpath, add_to_vocab_if_not_present=False) ft = featurize(ftz=[word2vec.tf_map, inttk.tf_map]) dataset = tf.data.Dataset.from_generator( txt_generator, output_types=(tf.string, tf.string), output_shapes=(tf.TensorShape((None, )), tf.TensorShape((None, ))), ) print('Created tf.data.Dataset') dataset = dataset.map(create_decoder_target) dataset = dataset.map( lambda x1, x2, y: tf.py_function(func=ft.tf_map, inp=[x1, x2, y], Tout=(tf.float32, tf.float32, tf.int32 )), num_parallel_calls=tf.data.experimental.AUTOTUNE, ) dataset = dataset.map(lambda x1, x2, y: ((x1, x2), y)) if reverse_context: dataset = dataset.map(flip_context_func) # dataset is fairly small (c. 200mb) so use the cache dataset.cache() # Shuffle and batch the dataset, dataset = dataset.shuffle(buffer_size).batch(batch_size, drop_remainder=True) # Use prefetch to allow model to get batches in the background while training dataset.prefetch(tf.data.experimental.AUTOTUNE) return dataset
def wrapper(steps, times, values): # Use a tf.py_function to prevent auto-graph from compiling the method return tf.py_function(func, inp=(steps, times, values), Tout=(steps.dtype, times.dtype, values.dtype))
def tf_write_summary(tar_real, predictions, step): return tf.py_function(write_summary, [tar_real, predictions, step], Tout=[tf.float32, tf.float32])
if lib in ["tf_decode_wav"]: dataset = dataset.map( lambda x: loaders.load_tf_decode_wav(x), num_parallel_calls=1) elif lib in ["tfio_fromaudio"]: dataset = dataset.map( lambda x: loaders.load_tfio_fromaudio(x, args.ext), num_parallel_calls=1) elif lib in ["tfio_fromffmpeg"]: dataset = dataset.map( lambda x: loaders.load_tfio_fromffmpeg(x), num_parallel_calls=1) else: loader_function = getattr(loaders, 'load_' + lib) dataset = dataset.map(lambda filename: tf.py_function( _make_py_loader_function(loader_function), [filename], [tf.float32]), num_parallel_calls=1) dataset = dataset.apply(tf.data.experimental.ignore_errors()) start = time.time() for i in range(repeat): for audio in dataset: value = tf.reduce_max(audio) if value: append = True end = time.time()
def _process_text_map_fn(self, text, label): processed_text, label = tf.py_function(self._process_text, inp=[text, label], Tout=(tf.float32, tf.int64)) return processed_text, label
def tf_file_exists(filepath): return tf.py_function( lambda x: os.path.exists(x.numpy()), inp=[filepath], Tout=tf.bool)
def tf_encode(pt, en): result_pt, result_en = tf.py_function(encode, [pt, en], [tf.int64, tf.int64]) result_pt.set_shape([None]) result_en.set_shape([None]) return result_pt, result_en
def model_part3(results): ''' apply nms on the rois :param results: :return: ''' per_roi = results[4] per_roi = per_roi[:, 1:] score = tf.reshape(results[2], (-1, 1)) roi = tf.concat((per_roi, score), axis=-1) # apply nms on rois to get boxes with highest scores # keep = tf.py_function(nms, [roi, 0.3], tf.int32) # keep = nms(roi, 0.3) score = tf.reshape(results[2], (-1, )) keep = tf.image.non_max_suppression(per_roi, score, max_output_size=20, iou_threshold=0.3) ovr_threshold = tf.convert_to_tensor(0.5) for i in keep: keep1 = tf.numpy_function(nms1, [i, keep, roi, ovr_threshold], tf.int32) box_up_num = len(keep1) + 1 # get an unsupressed box mask_i = results[3][i] # (h,w,2) roi_i = results[4][i] # (n,4) score_i = results[2][i] # (n,) mask_i, roi_i = tf.py_function(mask_transform, [mask_i, roi_i], [tf.float32, tf.float32]) mask_i = tf.pad(tensor=mask_i, paddings=[[roi_i[-4], 0], [roi_i[-3], 0], [0, 0]]) mask_i *= score_i # fuse the unsupressed box with supressed boxes by weighted averaging for j in keep1: mask_j = results[3][j] roi_j = results[4][j] score_j = results[3][j] mask_j, roi_j = tf.py_function(mask_transform, [mask_j, roi_j], [tf.float32, tf.float32]) rb = tf.maximum(roi_i[-2:], roi_j[-2:]) pad_rb = rb - roi_j[-2:] mask_j = tf.pad(tensor=mask_j, paddings=[[roi_j[-4], pad_rb[0]], [roi_j[-3], pad_rb[1]], [0, 0]]) mask_i = mask_i + mask_j * score_j mask_i /= box_up_num mask = mask_i[roi_i[-4]:, roi_i[-3]:, :] results[3][i] = mask # compute the postive boxes and positive boxes # compute the boxes' inside_weights and out_side weights roi_item = result[4][keep[0]] # (4,) offset_item = result[5][keep[0]] # (4,) cls_item = result[0][keep[0]] rois = tf.reshape(roi_item, (1, -1)) offset = tf.reshape(offset_item, (1, -1)) cls = tf.reshape(cls_item, (1, -1)) roi_mask_item = result[3][keep[0]] roi_mask = tf.reshape(roi_mask_item, (1, 5, 5, 2)) for i in range(1, len(keep)): roi_item = result[4][keep[i]] roi_item = tf.reshape(roi_item, (1, -1)) rois = tf.concat((rois, roi_item), axis=0) # (n,4) offset_item = result[5][keep[i]] offset_item = tf.reshape(offset_item, (1, -1)) offset = tf.concat((offset, offset_item), axis=0) # (n,4) cls_item = result[0][keep[i]] cls_item = tf.reshape(cls_item, (1, -1)) cls = tf.concat((cls, cls_item), axis=0) # (n,k+1) roi_mask_item = result[3][keep[i]] roi_mask_item = tf.reshape(roi_mask_item, (1, 5, 5, 2)) roi_mask = tf.concat((roi_mask, roi_mask_item), axis=0) return rois, cls, offset, roi_mask
def tf_load_audio(filepath): return tf.py_function( lambda x: load_audio(x.numpy()), inp=[filepath], Tout=[tf.float32, tf.int32])
def load_image(path, label): return tf.py_function(_load_image, (path, label), (tf.float32, tf.int32))
def tf_serialize_example(f0, f1, f2, f3): tf_string = tf.py_function(serialize_example, (f0, f1, f2, f3), tf.string) return tf.reshape(tf_string, ())
def f(x): tf.py_function(side_effect, inp=[x], Tout=[])
def load_image(path, label): image, label = tf.py_function(_load_image, (path, label), (tf.float32, tf.int32)) image.set_shape([None, None, None]) label.set_shape([]) return tf.image.resize(image, INPUT_IMAGE_SIZE), label
def inTrainFilter(ident, _dummy_1, _dummy_2): return not (tf.py_function(inValidationFilter, [ident], (tf.bool)))
def alaska_tf(y_true, y_val): """Wrapper for the above function""" return tf.py_function(func=alaska_wuac_metric, inp=[y_true, y_val], Tout=tf.float32)
def _augment_and_encode(x, y): if augment: x = aug_func(x) y = tf.py_function(_encode_text, (y, ), Tout=tf.int64) return x, y
return z #(2)建立网络模型 # 创建模型 # 占位符 X = tf.compat.v1.placeholder("float") Y = tf.compat.v1.placeholder("float") # 模型参数 W = tf.Variable(tf.random.normal([1]), name="weight") b = tf.Variable(tf.zeros([1]), name="bias") # 前向结构 #z = tf.multiply(X, W)+ b z = tf.py_function(my_py_func, [X, W, b], tf.float32) global_step = tf.Variable(0, name='global_step', trainable=False) #反向优化 cost = tf.reduce_mean(input_tensor=tf.square(Y - z)) learning_rate = 0.01 optimizer = tf.compat.v1.train.GradientDescentOptimizer( learning_rate).minimize(cost, global_step) #梯度下降 # 定义学习参数 training_epochs = 34 display_step = 2 savedir = "log/" saver = tf.compat.v1.train.Saver( tf.compat.v1.global_variables(), max_to_keep=1) #生成saver。 max_to_keep=1,表明最多只保存一个检查点文件
def tf_serialize_example(image_str, label): tf_string = tf.py_function( serialize_example, (image_str, label), tf.string) return tf.reshape(tf_string, ()) # The result is a scalar
def mapped_function(s): # Do some hard pre-processing tf.py_function(lambda: time.sleep(0.03), [], ()) return s
def load_file_and_preprocess(path): shape = (tf.float16, tf.int16) if config['float16'] else (tf.float32, tf.int32) pyf = tf.py_function(wrapped_loader, [path], shape) return pyf
def tf_encode(data): result_input, result_label = tf.py_function(encode, [data[0], data[1]], [tf.int64, tf.int64]) return result_input, result_label
def tf_iou(y_true, y_pred): iou = tf.py_function(get_iou, [y_true, y_pred], tf.float32) return iou
def tf_encode(doc, summary): return tf.py_function(encode, [doc, summary], [tf.int64, tf.int64])
def tf_encode(txt, eq): return tf.py_function(encode, [txt, eq], [tf.int64, tf.int64])
return blocks, masks, label listOfFiles = os.listdir("/tf/kaggle/tf_records") pattern = "*.tfrecords" listOfFiles = [ "/tf/kaggle/tf_records/" + x for x in listOfFiles if fnmatch.fnmatch(x, pattern) ] print(listOfFiles) dataset = tf.data.TFRecordDataset(listOfFiles) print(dataset) #train=dataset.map(parse_tfrecord) train = dataset.map(lambda x1: tf.py_function( func=parse_tfrecord, inp=[x1], Tout=[tf.float32, tf.int32, tf.int64])) train = train.batch(4) print(train) layer_1 = tf.keras.layers.Conv2D(6, (7, 7), activation='relu', input_shape=(7, 7, 1280)) layer_12 = tf.keras.layers.Conv2D(6, (1, 1), activation='relu', input_shape=(1, 1, 6)) layer_2 = tf.keras.layers.Conv3D(6, (max_num_blocks, 1, 1), activation='relu', input_shape=(7, 7, 1280)) for blocks, masks, labels in train:
def _deploy_exe_info(self, losses, info): with tf.name_scope("deploy_exe_info"): hp = self.hparams if self.trainable: # Train self.train_loss = losses params = tf.trainable_variables() if hp.tunable: learning_rate = hp.tune_rate else: learning_rate = hp.learning_rate self.learning_rate = tf.constant(learning_rate, dtype=tf.float32) # Warm-up self.learning_rate = self._get_learning_rate_warmup() # Decay self.learning_rate = self._get_learning_rate_decay() # Optimizer opt = tf.train.MomentumOptimizer(self.learning_rate, hp.momentum_factor) # Gradient gradients = tf.gradients(self.train_loss, params) # Gradient clip clipped_grads, grad_norm_summaries, grad_norm = helper.gradient_clip( gradients, max_gradient_norm=hp.max_grad_norm) # Gradient norm for summary in grad_norm_summaries: self._add_to_summaries(summary) self.grad_norm = grad_norm # Apply update to params self.update = opt.apply_gradients( zip(clipped_grads, params), global_step=self.global_step) # Trainable params summary print("# Trainable variables") print("Format: <name>, <shape>, <(soft) device placement>") for param in params: self.histogram.update({param.name: param}) print(" %s, %s, %s" % (param.name, str(param.get_shape()), param.op.device)) self.histogram.update(train_loss=self.train_loss, learning_rate=self.learning_rate) if hp.forward_rcnn: self.class_predicts = self.reverse_cate_table.lookup( tf.to_int64(info["class_predicts"])) self.detected_images = tf.py_function( misc.draw_boxes_on_image, [self.images_data, info["bbox_labels"], info["class_scores"], self.class_predicts, self.im_info, hp.pixel_mean], Tout=tf.float32) self.train_summary = self._config_train_summary() elif self.predicable: # Infer stddevs = tf.tile(tf.constant(hp.bbox_norm_stddevs), multiples=hp.num_class) means = tf.tile(tf.constant(hp.bbox_norm_means), multiples=hp.num_class) deltas = info["bbox_predicts"] # Restore bbox predicts deltas = tf.add(tf.multiply(deltas, stddevs), means) info["bbox_predicts"] = deltas rois = info["rois"] self.class_scores = info["class_scores"] self.class_predicts = self.reverse_cate_table.lookup( tf.to_int64(info["class_predicts"])) # Get predicted ground-truth bbox self.bboxes = proposal_util.bboxes_regression(rois, deltas) self.detected_images = tf.py_function( misc.draw_boxes_on_image, [self.images_data, self.bboxes, self.class_scores, self.class_predicts, self.im_info, hp.pixel_mean], Tout=tf.float32) self.infer_summary = self._config_infer_summary() else: # Eval rois = info["rois"] deltas = info["bbox_predicts"] self.eval_loss = losses bboxes = proposal_util.bboxes_regression(rois, deltas) self.accuracy = misc.mean_avg_overlap( bboxes, self.bbox_labels) self.eval_summary = self._config_eval_summary()
train_ds, test_ds, val_ds = get_open_shelf_dataset() def resize_eager_test(image, label): image = cv2.resize(image, (96, 96)) image = (image / 255).astype(dtype=np.float32) return image, label def resize(image, label): image = cv2.resize(image.numpy(), (96, 96)) return image, label # val_ds = val_ds.map(lambda item: tf.numpy_function( # resize_eager_test, [item['image'], item['label']], [tf.float32, tf.int64]) # ) val_ds = val_ds \ .map(lambda item: tf.py_function(resize, [item['image'], item['label']], [tf.uint8, tf.int64])) \ .map(lambda image, label: tf.py_function(data_aug_v2, [image, label], [tf.float32, tf.int64])) \ .batch(batch_size=1) index = 0 for i in val_ds: matplotlib.image.imsave('./images/{}-augmentation.jpg'.format(index), i[0][0].numpy()) index += 1
def tf_file_exists(image_file, label): [ exists, ] = tf.py_function(file_exists, [image_file], [tf.bool]) return exists