def _default_config(args): config = flow.function_config() config.default_logical_view(flow.scope.consistent_view()) config.default_data_type(flow.float) if args.use_fp16: config.enable_auto_mixed_precision(True) return config
def _default_config(args): config = flow.function_config() config.default_logical_view(flow.scope.consistent_view()) config.default_data_type(flow.float) if args.use_fp16: config.enable_auto_mixed_precision(True) if args.use_xla: config.use_xla_jit(True) config.enable_fuse_add_to_output(True) if args.use_tensorrt: config.use_tensorrt(True) if args.use_int8_online or args.use_int8_offline: config.tensorrt.use_int8() elif args.use_int8_online or args.use_int8_offline: raise Exception( "You can set use_int8_online or use_int8_offline only after use_tensorrt is True!" ) if args.use_int8_offline: int8_calibration_path = "./int8_calibration" config.tensorrt.int8_calibration(int8_calibration_path) if args.use_int8_offline and args.use_int8_online: raise ValueError( "You cannot use use_int8_offline or use_int8_online at the same time!" ) return config
def GetFunctionConfig(args): config = flow.function_config() config.enable_auto_mixed_precision(args.use_fp16) if args.use_xla: config.use_xla_jit(True) config.enable_fuse_add_to_output(True) config.enable_fuse_model_update_ops(True) return config
def make_func_config(args): config = flow.function_config() if args.use_fp16: config.enable_auto_mixed_precision(True) config.prune_parallel_cast_ops(True) config.enable_fuse_add_to_output(True) config.enable_fuse_model_update_ops(True) config.enable_fuse_cast_scale(True) # turn on the flag of none-distributed-optimizer config.enable_non_distributed_optimizer(False) return config
def get_train_config(): config = flow.function_config() config.default_data_type(flow.float) return config
if name is None: name = "Mini_Reader_uniqueID" return (flow.user_op_builder(name).Op("MiniReader").Output("out").Attr( "data_dir", minidata_dir).Attr("data_part_num", data_part_num).Attr( "batch_size", batch_size).Attr("part_name_prefix", part_name_prefix).Attr( "random_shuffle", random_shuffle).Attr( "shuffle_after_epoch", shuffle_after_epoch).Attr( "part_name_suffix_length", part_name_suffix_length).Attr("shuffle_buffer_size", shuffle_buffer_size). Build().InferAndTryRun().RemoteBlobList()[0]) config = flow.function_config() config.default_data_type(flow.double) @flow.global_function("train", config) def test_job() -> tp.Numpy: batch_size = 10 with flow.scope.placement("cpu", "0:0"): miniRecord = MiniReader( "./", batch_size=batch_size, data_part_num=2, part_name_suffix_length=3, random_shuffle=True, shuffle_after_epoch=True, )
def get_val_config(): config = flow.function_config() config.default_logical_view(flow.scope.consistent_view()) config.default_data_type(flow.float) return config
def get_train_config(): config = flow.function_config() config.default_data_type(flow.float) config.default_logical_view(flow.scope.mirrored_view()) return config
from Scnet.utils.clsidx_to_labels import clsidx_2_labels def load_image(image_path='data/img_red.png'): print(image_path) im = Image.open(image_path) im = im.resize((224, 224)) im = im.convert('RGB') # 有的图像是单通道的,不加转换会报错 im = np.array(im).astype('float32') im = (im - args.rgb_mean) / args.rgb_std im = np.transpose(im, (2, 0, 1)) im = np.expand_dims(im, axis=0) return np.ascontiguousarray(im, 'float32') @flow.global_function("predict", flow.function_config()) def InferenceNet(images: tp.Numpy.Placeholder((1, 3, 224, 224), dtype=flow.float)) -> tp.Numpy: body, logits = resnet50(images, args, training=False) predictions = flow.nn.softmax(logits) return predictions def main(): flow.env.log_dir(args.log_dir) assert os.path.isdir(args.model_load_dir) check_point = flow.train.CheckPoint() check_point.load(args.model_load_dir) image = load_image(args.image_path)
def GetFunctionConfig(args): config = flow.function_config() config.enable_auto_mixed_precision(args.use_fp16) return config
w = img_width h = img_height origin_h = img.shape[1] # orignal height origin_w = img.shape[2] # orignal width resize_img = resize_image(img, origin_h, origin_w, h, w) # resize img # normalize resize_img[0] = (resize_img[0] - norm_mean[0]) / norm_std[0] resize_img[1] = (resize_img[1] - norm_mean[1]) / norm_std[1] resize_img[2] = (resize_img[2] - norm_mean[2]) / norm_std[2] result_list.append(resize_img) # image list results = np.asarray(result_list).astype(np.float32) return results @flow.global_function(flow.function_config()) def faceseg_job(image=flow.FixedTensorDef((1,3,256,256), dtype=flow.float)): feature = LinkNet34(image,trainable=False,batch_size=1) # use linknet34 model to segment face return feature def faceSeg(img_path,model_para_path): # input image preprocess query_images = batch_image_preprocess(img_path,256,256) feature = faceseg_job(query_images).get() return feature