def post_process(output): ## input is (3,656,368),output is (1,46,82,57) output=np.reshape(output,(1,46,82,57)) tensor_output = tf.convert_to_tensor(output) tensor_heatMat = tensor_output[:, :, :, :19] tensor_pafMat = tensor_output[:, :, :, 19:] #upsample_size = tf.placeholder(dtype=tf.int32, shape=(2,), name='upsample_size') sw,sh=int(368/2),int(656/2) upsample_size =tf.constant([sw,sh],dtype=tf.int32) tensor_heatMat_up = tf.image.resize_area(tensor_heatMat, upsample_size, align_corners=False, name='upsample_heatmat') tensor_pafMat_up = tf.image.resize_area(tensor_pafMat, upsample_size, align_corners=False, name='upsample_pafmat') smoother = Smoother({'data': tensor_heatMat_up}, 25, 3.0) gaussian_heatMat = smoother.get_output() max_pooled_in_tensor = tf.nn.pool(gaussian_heatMat, window_shape=(3, 3), pooling_type='MAX', padding='SAME') tensor_peaks = tf.where(tf.equal(gaussian_heatMat, max_pooled_in_tensor), gaussian_heatMat, tf.zeros_like(gaussian_heatMat)) heatMat = pafMat = None ss=time.time() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) peaks=tensor_peaks.eval(session=sess) heatMat_up=tensor_heatMat_up.eval(session=sess) pafMat_up=tensor_pafMat_up.eval(session=sess) print('the shape of p,h,f',peaks.shape, heatMat_up.shape, pafMat_up.shape,time.time()-ss,peaks) #return tensor_peaks, tensor_heatMat_up, tensor_pafMat_up return peaks, heatMat_up, pafMat_up
def __init__(self, graph_path, target_size=(320, 240), tf_config=None): self.target_size = target_size # load graph logger.info('loading graph from %s(default size=%dx%d)' % (graph_path, target_size[0], target_size[1])) with tf.gfile.GFile(graph_path, 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) self.graph = tf.get_default_graph() tf.import_graph_def(graph_def, name='TfPoseEstimator') self.persistent_sess = tf.Session(graph=self.graph, config=tf_config) # use network input node and output node self.tensor_image = self.graph.get_tensor_by_name('TfPoseEstimator/image:0') self.tensor_output = self.graph.get_tensor_by_name('TfPoseEstimator/Openpose/concat_stage7:0') # 0~19 channels are heatmap, while the other maps are pafs. self.tensor_heatMat = self.tensor_output[:, :, :, :19] self.tensor_pafMat = self.tensor_output[:, :, :, 19:] # upsample_size to set how to resize the results self.upsample_size = tf.placeholder(dtype=tf.int32, shape=(2,), name='upsample_size') self.tensor_heatMat_up = tf.image.resize_area(self.tensor_output[:, :, :, :19], self.upsample_size, align_corners=False, name='upsample_heatmat') self.tensor_pafMat_up = tf.image.resize_area(self.tensor_output[:, :, :, 19:], self.upsample_size, align_corners=False, name='upsample_pafmat') # gaussian filter for heatmap, after filter we get the pure heatmap. smoother = Smoother({'data': self.tensor_heatMat_up}, 25, 3.0) gaussian_heatMat = smoother.get_output() max_pooled_in_tensor = tf.nn.pool(gaussian_heatMat, window_shape=(3, 3), pooling_type='MAX', padding='SAME') self.tensor_peaks = tf.where(tf.equal(gaussian_heatMat, max_pooled_in_tensor), gaussian_heatMat, tf.zeros_like(gaussian_heatMat)) self.heatMat = self.pafMat = None # warm-up network use random input self.persistent_sess.run(tf.variables_initializer( [v for v in tf.global_variables() if v.name.split(':')[0] in [x.decode('utf-8') for x in self.persistent_sess.run(tf.report_uninitialized_variables())] ]) ) self.persistent_sess.run( [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up], feed_dict={ self.tensor_image: [np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32)], self.upsample_size: [target_size[1], target_size[0]] } ) self.persistent_sess.run( [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up], feed_dict={ self.tensor_image: [np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32)], self.upsample_size: [target_size[1] // 2, target_size[0] // 2] } ) self.persistent_sess.run( [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up], feed_dict={ self.tensor_image: [np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32)], self.upsample_size: [target_size[1] // 4, target_size[0] // 4] } )
def __init__(self, graph_path, target_size=(320, 240), tf_config=None): self.target_size = target_size # load graph logger.info('loading graph from %s(default size=%dx%d)' % (graph_path, target_size[0], target_size[1])) with tf.gfile.GFile(graph_path, 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) #self.graph = tf.get_default_graph() self.graph = tf.Graph() tf.import_graph_def(graph_def, name='TfPoseEstimator') self.persistent_sess = tf.Session(graph=self.graph, config=tf_config) with self.graph.as_default(): tf.import_graph_def(graph_def, name='TfPoseEstimator') # for op in self.graph.get_operations(): # print("operations :",op.name) # for ts in [n.name for n in tf.get_default_graph().as_graph_def().node]: # print("nodes :",ts) self.tensor_image = self.graph.get_tensor_by_name('TfPoseEstimator/image:0') print("tensor output:", self.tensor_image) self.tensor_output = self.graph.get_tensor_by_name('TfPoseEstimator/Openpose/concat_stage7:0') self.tensor_heatMat = self.tensor_output[:, :, :, :19] self.tensor_pafMat = self.tensor_output[:, :, :, 19:] self.upsample_size = tf.placeholder(dtype=tf.int32, shape=(2,), name='upsample_size') self.tensor_heatMat_up = tf.image.resize_area(self.tensor_output[:, :, :, :19], self.upsample_size, align_corners=False, name='upsample_heatmat') self.tensor_pafMat_up = tf.image.resize_area(self.tensor_output[:, :, :, 19:], self.upsample_size, align_corners=False, name='upsample_pafmat') smoother = Smoother({'data': self.tensor_heatMat_up}, 25, 3.0) gaussian_heatMat = smoother.get_output() max_pooled_in_tensor = tf.nn.pool(gaussian_heatMat, window_shape=(3, 3), pooling_type='MAX', padding='SAME') self.tensor_peaks = tf.where(tf.equal(gaussian_heatMat, max_pooled_in_tensor), gaussian_heatMat, tf.zeros_like(gaussian_heatMat)) self.heatMat = self.pafMat = None # warm-up self.persistent_sess.run(tf.variables_initializer( [v for v in tf.global_variables() if v.name.split(':')[0] in [x.decode('utf-8') for x in self.persistent_sess.run(tf.report_uninitialized_variables())] ]) ) '''
def run_test(image, model): persistent_sess = tf.Session() tensor_image = image interpreter = tf.contrib.lite.Interpreter(model_path=model) interpreter.allocate_tensors() input_index = interpreter.get_input_details()[0]["index"] output_index = interpreter.get_output_details()[0]["index"] interpreter.set_tensor(input_index, tensor_image) interpreter.invoke() tensor_output = interpreter.get_tensor(output_index) upsample_size = [116, 108] tensor_heatMat_up = tf.image.resize_area(tensor_output[:, :, :, :19], upsample_size, align_corners=False, name='upsample_heatmat') tensor_pafMat_up = tf.image.resize_area(tensor_output[:, :, :, 19:], upsample_size, align_corners=False, name='upsample_pafmat') smoother = Smoother({'data': tensor_heatMat_up}, 25, 3.0) gaussian_heatMat = smoother.get_output() max_pooled_in_tensor = tf.nn.pool(gaussian_heatMat, window_shape=(3, 3), pooling_type='MAX', padding='SAME') tensor_peaks = tf.where(tf.equal(gaussian_heatMat, max_pooled_in_tensor), gaussian_heatMat, tf.zeros_like(gaussian_heatMat)) persistent_sess.close() persistent_sess = tf.Session() persistent_sess.run(tf.global_variables_initializer()) peaks, heatMat_up, pafMat_up = persistent_sess.run( [tensor_peaks, tensor_heatMat_up, tensor_pafMat_up]) return peaks, heatMat_up, pafMat_up
def __init__(self, graph_fn): self.forward_fn = graph_fn self.graph = tf.Graph() with self.graph.as_default(): self.tensor_output = tf.placeholder(dtype=tf.float32, shape=(None, None, None, 57)) self.tensor_heatMat = self.tensor_output[:, :, :, :19] self.tensor_pafMat = self.tensor_output[:, :, :, 19:] self.upsample_size = tf.placeholder(dtype=tf.int32, shape=(2, ), name='upsample_size') self.tensor_heatMat_up = tf.image.resize_area( self.tensor_output[:, :, :, :19], self.upsample_size, align_corners=False, name='upsample_heatmat') self.tensor_pafMat_up = tf.image.resize_area( self.tensor_output[:, :, :, 19:], self.upsample_size, align_corners=False, name='upsample_pafmat') gaussian_heatMat = Smoother({ 'data': self.tensor_heatMat_up }, 25, 3.0).get_output() max_pooled_in_tensor = tf.nn.pool(gaussian_heatMat, (3, 3), 'MAX', 'SAME') self.tensor_peaks = tf.where( tf.equal(gaussian_heatMat, max_pooled_in_tensor), gaussian_heatMat, tf.zeros_like(gaussian_heatMat)) self.persistent_sess = tf.Session(graph=self.graph) self.persistent_sess.run( tf.variables_initializer([ v for v in tf.global_variables() if v.name.split(':')[0] in [ x.decode('utf-8') for x in self.persistent_sess.run( tf.report_uninitialized_variables()) ] ])) self.heatMat = self.pafMat = None
def __init__(self, graph_path, target_size=(320, 240), tf_config=None, trt_bool=False): self.target_size = target_size # load graph logger.info('loading graph from %s(default size=%dx%d)' % (graph_path, target_size[0], target_size[1])) with tf.gfile.GFile(graph_path, 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) if trt_bool is True: output_nodes = ["Openpose/concat_stage7"] graph_def = trt.create_inference_graph( graph_def, output_nodes, max_batch_size=1, max_workspace_size_bytes=1 << 20, precision_mode="FP16", # precision_mode="INT8", minimum_segment_size=3, is_dynamic_op=True, maximum_cached_engines=int(1e3), use_calibration=True, ) self.graph = tf.get_default_graph() tf.import_graph_def(graph_def, name='TfPoseEstimator') self.persistent_sess = tf.Session(graph=self.graph, config=tf_config) for ts in [n.name for n in tf.get_default_graph().as_graph_def().node]: print(ts) self.tensor_image = self.graph.get_tensor_by_name( 'TfPoseEstimator/image:0') self.tensor_output = self.graph.get_tensor_by_name( 'TfPoseEstimator/Openpose/concat_stage7:0') self.tensor_heatMat = self.tensor_output[:, :, :, :19] self.tensor_pafMat = self.tensor_output[:, :, :, 19:] self.upsample_size = tf.placeholder(dtype=tf.int32, shape=(2, ), name='upsample_size') self.tensor_heatMat_up = tf.image.resize_area( self.tensor_output[:, :, :, :19], self.upsample_size, align_corners=False, name='upsample_heatmat') self.tensor_pafMat_up = tf.image.resize_area( self.tensor_output[:, :, :, 19:], self.upsample_size, align_corners=False, name='upsample_pafmat') if trt_bool is True: smoother = Smoother({'data': self.tensor_heatMat_up}, 25, 3.0, 19) else: smoother = Smoother({'data': self.tensor_heatMat_up}, 25, 3.0) gaussian_heatMat = smoother.get_output() max_pooled_in_tensor = tf.nn.pool(gaussian_heatMat, window_shape=(3, 3), pooling_type='MAX', padding='SAME') self.tensor_peaks = tf.where( tf.equal(gaussian_heatMat, max_pooled_in_tensor), gaussian_heatMat, tf.zeros_like(gaussian_heatMat)) self.heatMat = self.pafMat = None # warm-up self.persistent_sess.run( tf.variables_initializer([ v for v in tf.global_variables() if v.name.split(':')[0] in [ x.decode('utf-8') for x in self.persistent_sess.run( tf.report_uninitialized_variables()) ] ])) self.persistent_sess.run( [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up], feed_dict={ self.tensor_image: [ np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32) ], self.upsample_size: [target_size[1], target_size[0]] }) self.persistent_sess.run( [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up], feed_dict={ self.tensor_image: [ np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32) ], self.upsample_size: [target_size[1] // 2, target_size[0] // 2] }) self.persistent_sess.run( [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up], feed_dict={ self.tensor_image: [ np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32) ], self.upsample_size: [target_size[1] // 4, target_size[0] // 4] }) # logs if self.tensor_image.dtype == tf.quint8: logger.info('quantization mode enabled.')
def __init__(self, graph_path=None, meta_graph_path=None, target_size=(320, 240), tf_config=None): self.target_size = target_size if graph_path: # load graph logger.info('loading graph from %s(default size=%dx%d)' % (graph_path, target_size[0], target_size[1])) with tf.gfile.GFile(graph_path, 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) self.graph = tf.get_default_graph() tf.import_graph_def(graph_def, name='TfPoseEstimator') self.persistent_sess = tf.Session(graph=self.graph, config=tf_config) elif meta_graph_path: logger.info("Loading metagraph from %s", meta_graph_path) self.persistent_sess = tf.Session(config=tf_config) new_saver = tf.train.import_meta_graph(meta_graph_path) new_saver.restore(self.persistent_sess, os.path.dirname(meta_graph_path)) else: raise ValueError("Provide either graph_path or meta_graph_path") # for op in self.graph.get_operations(): # print(op.name) # for ts in [n.name for n in tf.get_default_graph().as_graph_def().node]: # print(ts) self.tensor_image = self.graph.get_tensor_by_name( 'TfPoseEstimator/image:0') self.tensor_output = self.graph.get_tensor_by_name( 'TfPoseEstimator/Openpose/concat_stage7:0') self.tensor_heatMat = self.tensor_output[:, :, :, :19] self.tensor_pafMat = self.tensor_output[:, :, :, 19:] self.upsample_size = tf.placeholder(dtype=tf.int32, shape=(2, ), name='upsample_size') self.tensor_heatMat_up = tf.image.resize_area( self.tensor_output[:, :, :, :19], self.upsample_size, align_corners=False, name='upsample_heatmat') self.tensor_pafMat_up = tf.image.resize_area( self.tensor_output[:, :, :, 19:], self.upsample_size, align_corners=False, name='upsample_pafmat') smoother = Smoother({'data': self.tensor_heatMat_up}, 25, 3.0) gaussian_heatMat = smoother.get_output() max_pooled_in_tensor = tf.nn.pool(gaussian_heatMat, window_shape=(3, 3), pooling_type='MAX', padding='SAME') self.tensor_peaks = tf.where( tf.equal(gaussian_heatMat, max_pooled_in_tensor), gaussian_heatMat, tf.zeros_like(gaussian_heatMat)) self.heatMat = self.pafMat = None # warm-up self.persistent_sess.run( tf.variables_initializer([ v for v in tf.global_variables() if v.name.split(':')[0] in [ x.decode('utf-8') for x in self.persistent_sess.run( tf.report_uninitialized_variables()) ] ])) self.persistent_sess.run( [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up], feed_dict={ self.tensor_image: [ np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32) ], self.upsample_size: [target_size[1], target_size[0]] }) self.persistent_sess.run( [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up], feed_dict={ self.tensor_image: [ np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32) ], self.upsample_size: [target_size[1] // 2, target_size[0] // 2] }) self.persistent_sess.run( [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up], feed_dict={ self.tensor_image: [ np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32) ], self.upsample_size: [target_size[1] // 4, target_size[0] // 4] })
def __init__(self, graph_path, target_size=(320, 240), tf_config=None, trt_bool=False): self.target_size = target_size # load graph logger.info('loading graph from %s(default size=%dx%d)' % (graph_path, target_size[0], target_size[1])) graph_def = tf.GraphDef() if trt_bool is True: rt_graph_path = os.path.splitext(graph_path)[0] #import os first rt_graph_path = rt_graph_path + "_rt.pb" with tf.gfile.GFile(rt_graph_path, 'rb') as f: graph_def.ParseFromString(f.read()) else: with tf.gfile.GFile(graph_path, 'rb') as f: graph_def.ParseFromString(f.read()) self.graph = tf.get_default_graph() tf.import_graph_def(graph_def, name='TfPoseEstimator') if tf_config is None: if (trt_bool is True): tf_config = tf.compat.v1.ConfigProto(gpu_options=tf.GPUOptions( per_process_gpu_memory_fraction=0.50)) else: tf_config = tf.compat.v1.ConfigProto() tf_config.gpu_options.allow_growth = True #sess = tf.Session(config=tf_config) self.persistent_sess = tf.Session(graph=self.graph, config=tf_config) #for ts in [n.name for n in tf.get_default_graph().as_graph_def().node]: # print(ts) self.tensor_image = self.graph.get_tensor_by_name( 'TfPoseEstimator/image:0') self.tensor_output = self.graph.get_tensor_by_name( 'TfPoseEstimator/Openpose/concat_stage7:0') self.tensor_heatMat = self.tensor_output[:, :, :, :19] self.tensor_pafMat = self.tensor_output[:, :, :, 19:] self.upsample_size = tf.placeholder(dtype=tf.int32, shape=(2, ), name='upsample_size') self.tensor_heatMat_up = tf.image.resize_area( self.tensor_output[:, :, :, :19], self.upsample_size, align_corners=False, name='upsample_heatmat') self.tensor_pafMat_up = tf.image.resize_area( self.tensor_output[:, :, :, 19:], self.upsample_size, align_corners=False, name='upsample_pafmat') if trt_bool is True: smoother = Smoother({'data': self.tensor_heatMat_up}, 25, 3.0, 19) else: smoother = Smoother({'data': self.tensor_heatMat_up}, 25, 3.0) gaussian_heatMat = smoother.get_output() max_pooled_in_tensor = tf.nn.pool(gaussian_heatMat, window_shape=(3, 3), pooling_type='MAX', padding='SAME') self.tensor_peaks = tf.where( tf.equal(gaussian_heatMat, max_pooled_in_tensor), gaussian_heatMat, tf.zeros_like(gaussian_heatMat)) self.heatMat = self.pafMat = None # warm-up self.persistent_sess.run( tf.variables_initializer([ v for v in tf.global_variables() if v.name.split(':')[0] in [ x.decode('utf-8') for x in self.persistent_sess.run( tf.report_uninitialized_variables()) ] ])) self.persistent_sess.run( [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up], feed_dict={ self.tensor_image: [ np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32) ], self.upsample_size: [target_size[1], target_size[0]] }) """ self.persistent_sess.run( [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up], feed_dict={ self.tensor_image: [np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32)], self.upsample_size: [target_size[1] // 2, target_size[0] // 2] } ) self.persistent_sess.run( [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up], feed_dict={ self.tensor_image: [np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32)], self.upsample_size: [target_size[1] // 4, target_size[0] // 4] } ) """ # logs if self.tensor_image.dtype == tf.quint8: logger.info('quantization mode enabled.')
def __init__(self, graph_path, target_size=(320, 240), tf_config=None): self.target_size = target_size # load graph logger.info('loading graph from %s(default size=%dx%d)' % (graph_path, target_size[0], target_size[1])) #with tf.gfile.GFile(graph_path, 'rb') as f: # graph_def = tf.GraphDef() # graph_def.ParseFromString(f.read()) self.graph = tf.get_default_graph() #tf.import_graph_def(graph_def, name='TfPoseEstimator') self.persistent_sess = tf.Session(graph=self.graph, config=tf_config) # for op in self.graph.get_operations(): # print(op.name) # for ts in [n.name for n in tf.get_default_graph().as_graph_def().node]: # print(ts) #self.tensor_image = self.graph.get_tensor_by_name('TfPoseEstimator/image:0') #self.tensor_output = self.graph.get_tensor_by_name('TfPoseEstimator/Openpose/concat_stage7:0') self.tensor_output = tf.placeholder(dtype=tf.float32, shape=[1, 34, 20, 57]) self.tensor_heatMat = self.tensor_output[:, :, :, 38:] self.tensor_pafMat = self.tensor_output[:, :, :, :38] self.upsample_size = tf.placeholder(dtype=tf.int32, shape=(2, ), name='upsample_size') self.tensor_heatMat_up = tf.image.resize_area( self.tensor_output[:, :, :, 38:], self.upsample_size, align_corners=False, name='upsample_heatmat') self.tensor_pafMat_up = tf.image.resize_area( self.tensor_output[:, :, :, :38], self.upsample_size, align_corners=False, name='upsample_pafmat') smoother = Smoother({'data': self.tensor_heatMat_up}, 25, 3.0) gaussian_heatMat = smoother.get_output() max_pooled_in_tensor = tf.nn.pool(gaussian_heatMat, window_shape=(3, 3), pooling_type='MAX', padding='SAME') self.tensor_peaks = tf.where( tf.equal(gaussian_heatMat, max_pooled_in_tensor), gaussian_heatMat, tf.zeros_like(gaussian_heatMat)) self.heatMat = self.pafMat = None logger.info('Done loading graph, warming up now') # warm-up self.persistent_sess.run( tf.variables_initializer([ v for v in tf.global_variables() if v.name.split(':')[0] in [ x.decode('utf-8') for x in self.persistent_sess.run( tf.report_uninitialized_variables()) ] ])) self.engine = InferenceEngine('/home/nvidia/cmu.engine') logger.info('1 warm ups done') #self.persistent_sess.run( # [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up], # feed_dict={ # self.tensor_image: [np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32)], # self.upsample_size: [target_size[1], target_size[0]] # } #) #logger.info('2 warm ups done') #self.persistent_sess.run( # [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up], # feed_dict={ # self.tensor_image: [np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32)], # self.upsample_size: [target_size[1] // 2, target_size[0] // 2] # } #) #logger.info('3 warm ups done') #self.persistent_sess.run( # [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up], # feed_dict={ # self.tensor_image: [np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32)], # self.upsample_size: [target_size[1] // 4, target_size[0] // 4] # } #) logger.info('all warm ups done')
def initialize_hyperparams(self, target_size=(320, 240), tf_config=None): logger.debug('ENTERED INITIALIZE_HYPERPARAMS() METHOD') self.target_size = target_size #self.persistent_sess = tf.Session(graph=self.graph, config=tf_config) # for op in self.graph.get_operations(): # print(op.name) # for ts in [n.name for n in tf.get_default_graph().as_graph_def().node]: # print(ts) self.tensor_image = self.graph.get_tensor_by_name( 'TfPoseEstimator/image:0') self.tensor_output = self.graph.get_tensor_by_name( 'TfPoseEstimator/Openpose/concat_stage7:0') self.tensor_heatMat = self.tensor_output[:, :, :, :19] self.tensor_pafMat = self.tensor_output[:, :, :, 19:] self.upsample_size = tf.placeholder(dtype=tf.int32, shape=(2, ), name='upsample_size') self.tensor_heatMat_up = tf.image.resize_area( self.tensor_output[:, :, :, :19], self.upsample_size, align_corners=False, name='upsample_heatmat') self.tensor_pafMat_up = tf.image.resize_area( self.tensor_output[:, :, :, 19:], self.upsample_size, align_corners=False, name='upsample_pafmat') smoother = Smoother({'data': self.tensor_heatMat_up}, 25, 3.0) gaussian_heatMat = smoother.get_output() max_pooled_in_tensor = tf.nn.pool(gaussian_heatMat, window_shape=(3, 3), pooling_type='MAX', padding='SAME') self.tensor_peaks = tf.where( tf.equal(gaussian_heatMat, max_pooled_in_tensor), gaussian_heatMat, tf.zeros_like(gaussian_heatMat)) self.heatMat = self.pafMat = None # warm-up logger.debug('START WARMUP 1') self.persistent_sess.run( tf.variables_initializer([v for v in tf.global_variables()])) #if #v.name.split(':')[0] in [x.decode('utf-8') for x in #self.persistent_sess.run(tf.report_uninitialized_variables())] #]) #) logger.debug('START WARMUP 2') self.persistent_sess.run( [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up], feed_dict={ self.tensor_image: [ np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32) ], self.upsample_size: [target_size[1], target_size[0]] }) logger.debug('START WARMUP 3') self.persistent_sess.run( [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up], feed_dict={ self.tensor_image: [ np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32) ], self.upsample_size: [target_size[1] // 2, target_size[0] // 2] }) logger.debug('START WARMUP 4') self.persistent_sess.run( [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up], feed_dict={ self.tensor_image: [ np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32) ], self.upsample_size: [target_size[1] // 4, target_size[0] // 4] }) logger.debug('END WARMUPS')
def __init__(self, blob, target_size=(224, 224), tf_config=None, is_mvnc=False): # earlier target_size=(320,240) if is_mvnc: if mvnc is None: print( "Please install MVNC libraries to use --is-mvnc option...") quit(-1) self.device = mvnc.Device(devices[0]) self.device.openDevice() self.obj = self.device.AllocateGraph(blob) self.graph = tf.get_default_graph() self.persistent_sess = tf.Session(graph=self.graph, config=tf_config) self.tensor_image = None self.tensor_output = tf.placeholder(tf.float16, shape=(1, target_size[0] // 8, target_size[1] // 8, 57), name='vectmap') #57? else: self.device = None graph_def = tf.GraphDef() graph_def.ParseFromString(blob) self.graph = tf.get_default_graph() tf.import_graph_def(graph_def, name='TfPoseEstimator') self.obj = self.persistent_sess = tf.Session(graph=self.graph, config=tf_config) try: self.tensor_image = self.graph.get_tensor_by_name( 'TfPoseEstimator/image:0') except KeyError as e: self.tensor_image = self.graph.get_tensor_by_name( 'TfPoseEstimator/split:0') try: self.tensor_output = self.graph.get_tensor_by_name( 'Openpose/concat_stage7:0') except KeyError as e: self.tensor_output = self.graph.get_tensor_by_name( 'TfPoseEstimator/Openpose/concat_stage7:0') # for op in self.graph.get_operations(): # print(op.name) # for ts in [n.name for n in tf.get_default_graph().as_graph_def().node]: # print(ts) self.tensor_heatMat = self.tensor_output[:, :, :, :19] self.tensor_pafMat = self.tensor_output[:, :, :, 19:] self.upsample_size = tf.placeholder(dtype=tf.int32, shape=(2, ), name='upsample_size') self.tensor_heatMat_up = tf.image.resize_area( self.tensor_output[:, :, :, :19], self.upsample_size, align_corners=False, name='upsample_heatmat') self.tensor_pafMat_up = tf.image.resize_area( self.tensor_output[:, :, :, 19:], self.upsample_size, align_corners=False, name='upsample_pafmat') smoother = Smoother({'data': self.tensor_heatMat_up}, 25, 3.0) gaussian_heatMat = smoother.get_output() max_pooled_in_tensor = tf.nn.pool(gaussian_heatMat, window_shape=(3, 3), pooling_type='MAX', padding='SAME') self.tensor_peaks = tf.where( tf.equal(gaussian_heatMat, max_pooled_in_tensor), gaussian_heatMat, tf.zeros_like(gaussian_heatMat)) self.heatMat = self.pafMat = None # warm-up if is_mvnc: self.persistent_sess.run( tf.variables_initializer([ v for v in tf.global_variables() if v.name.split(':')[0] in [ x.decode('utf-8') for x in self.persistent_sess.run( tf.report_uninitialized_variables()) ] ])) self.persistent_sess.run( [ self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up ], feed_dict={ self.tensor_output: [ np.ndarray(shape=(target_size[1] // 8, target_size[0] // 8, 57), dtype=np.float16) ], self.upsample_size: [target_size[1], target_size[0] ] #[target_size[1] // 8, target_size[0] // 8] }) self.persistent_sess.run( [ self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up ], feed_dict={ self.tensor_output: [ np.ndarray(shape=(target_size[1] // 8, target_size[0] // 8, 57), dtype=np.float16) ], self.upsample_size: [target_size[1] // 2, target_size[0] // 2 ] #[target_size[1] // 16, target_size[0] // 16] }) self.persistent_sess.run( [ self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up ], feed_dict={ self.tensor_output: [ np.ndarray(shape=(target_size[1] // 8, target_size[0] // 8, 57), dtype=np.float16) ], self.upsample_size: [target_size[1] // 4, target_size[0] // 4 ] #[target_size[1] // 32, target_size[0] // 32] }) else: self.persistent_sess.run( tf.variables_initializer([ v for v in tf.global_variables() if v.name.split(':')[0] in [ x.decode('utf-8') for x in self.persistent_sess.run( tf.report_uninitialized_variables()) ] ])) self.persistent_sess.run( [ self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up ], feed_dict={ self.tensor_image: [ np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32) ], self.upsample_size: [target_size[1], target_size[0]] }) self.persistent_sess.run( [ self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up ], feed_dict={ self.tensor_image: [ np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32) ], self.upsample_size: [target_size[1] // 2, target_size[0] // 2] }) self.persistent_sess.run( [ self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up ], feed_dict={ self.tensor_image: [ np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32) ], self.upsample_size: [target_size[1] // 4, target_size[0] // 4] }) self.is_mvnc = is_mvnc
def __init__(self, graph_path, target_size=(320, 240), tf_config=None, dataset='Coco', nr_vectmaps=28): self.target_size = target_size self.scaled_img_size = None # out of use self.pad = tf.placeholder(dtype=tf.int32, shape=(4, )) logger.info('loading graph from %s(default size=%dx%d)' % (graph_path, target_size[0], target_size[1])) with tf.gfile.GFile(graph_path, 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) self.graph = tf.get_default_graph() tf.import_graph_def(graph_def, name='TfPoseEstimator') self.persistent_sess = tf.Session(graph=self.graph, config=tf_config) if dataset == 'COCO': nr_keypoints = 19 nr_vectmaps = 38 elif dataset == 'MPI': nr_keypoints = 16 if dataset == 'Coco': self.tensor_image = self.graph.get_tensor_by_name( 'TfPoseEstimator/image:0') self.tensor_output = self.graph.get_tensor_by_name( 'TfPoseEstimator/Openpose/concat_stage7:0') self.tensor_heatMat = self.tensor_output[:, :, :, :nr_keypoints] self.tensor_pafMat = self.tensor_output[:, :, :, nr_keypoints:] else: if not ('caffe') in graph_path: self.tensor_image = self.graph.get_tensor_by_name( 'TfPoseEstimator/preprocess_divide:0') self.tensor_heatMat = self.graph.get_tensor_by_name( 'TfPoseEstimator/Mconv7_stage6_L2/BiasAdd:0') self.tensor_pafMat = self.graph.get_tensor_by_name( 'TfPoseEstimator/Mconv7_stage6_L1/BiasAdd:0') elif 'standalone' in graph_path: self.tensor_image = self.graph.get_tensor_by_name( 'TfPoseEstimator/input:0') self.tensor_heatMat = self.graph.get_tensor_by_name( 'TfPoseEstimator/Mconv7_stage6_L2/BiasAdd:0') self.tensor_pafMat = self.graph.get_tensor_by_name( 'TfPoseEstimator/Mconv7_stage6_L1/BiasAdd:0') else: self.tensor_image = self.graph.get_tensor_by_name( 'TfPoseEstimator/Placeholder:0') self.tensor_heatMat = self.graph.get_tensor_by_name( 'TfPoseEstimator/Mconv7_stage6_L2/BiasAdd:0') self.tensor_pafMat = self.graph.get_tensor_by_name( 'TfPoseEstimator/Mconv7_stage6_L1/BiasAdd:0') self.upsample_size = tf.placeholder(dtype=tf.int32, shape=(2, ), name='upsample_size') if dataset == 'Coco': self.tensor_heatMat_up = tf.image.resize_area( self.tensor_output[:, :, :, :nr_keypoints], self.upsample_size, align_corners=False, name='upsample_heatmat') self.tensor_pafMat_up = tf.image.resize_area( self.tensor_output[:, :, :, nr_keypoints:], self.upsample_size, align_corners=False, name='upsample_pafmat') else: self.tensor_heatMat = tf.image.resize_images( self.tensor_heatMat, tf.shape(self.tensor_image)[1:3], method=tf.image.ResizeMethod.BICUBIC) self.tensor_pafMat = tf.image.resize_images( self.tensor_pafMat, tf.shape(self.tensor_image)[1:3], method=tf.image.ResizeMethod.BICUBIC) np = self.tensor_heatMat.shape[-1] + self.tensor_pafMat.shape[-1] # if pad[0] < 0 self.tensor_heatMat = tf.cond( self.pad[0] < 0, lambda: tf.concat([ tf.concat([ tf.zeros( (1, -self.pad[0], tf.shape(self.tensor_heatMat)[1], tf.shape(self.tensor_heatMat)[-1])), tf.ones((1, -self.pad[0], tf.shape(self.tensor_heatMat) [1], 1)) ], axis=3), self.tensor_heatMat ], axis=0), lambda: self.tensor_heatMat[:, self.pad[0]:, :, :]) self.tensor_pafMat = tf.cond( self.pad[0] < 0, lambda: tf.concat([ tf.concat([ tf.zeros( (1, -self.pad[0], tf.shape(self.tensor_pafMat)[1], tf.shape(self.tensor_pafMat)[-1])), tf.ones((1, -self.pad[0], tf.shape(self.tensor_pafMat)[ 1], 1)) ], axis=3), self.tensor_pafMat ], axis=0), lambda: self.tensor_pafMat[:, self.pad[0]:, :, :]) # if pad[1] < 0 self.tensor_heatMat = tf.cond( self.pad[1] < 0, lambda: tf.concat([ tf.concat([ tf.zeros( (1, tf.shape(self.tensor_heatMat)[0], -self.pad[1], tf.shape(self.tensor_heatMat)[-1])), tf.ones((1, tf.shape(self.tensor_heatMat)[0], -self. pad[1], 1)) ], axis=3), self.tensor_heatMat ], axis=1), lambda: self.tensor_heatMat[:, :, self.pad[1]:, :]) self.tensor_pafMat = tf.cond( self.pad[1] < 0, lambda: tf.concat([ tf.concat([ tf.zeros( (1, tf.shape(self.tensor_pafMat)[0], -self.pad[1], tf.shape(self.tensor_pafMat)[-1])), tf.ones((1, tf.shape(self.tensor_pafMat)[0], -self.pad[ 1], 1)) ], axis=3), self.tensor_pafMat ], axis=1), lambda: self.tensor_pafMat[:, :, self.pad[1]:, :]) # if pad[2] < 0 self.tensor_heatMat = tf.cond( self.pad[2] < 0, lambda: tf.concat([ tf.concat([ tf.zeros( (1, -self.pad[2], tf.shape(self.tensor_heatMat)[1], tf.shape(self.tensor_heatMat)[-1])), tf.ones((1, -self.pad[2], tf.shape(self.tensor_heatMat) [1], 1)) ], axis=3), self.tensor_heatMat ], axis=0), lambda: tf. cond(tf.equal(self.pad[ 2], 0), lambda: self.tensor_heatMat[:, :, :, :], lambda: self.tensor_heatMat[:, :-self.pad[2], :, :])) self.tensor_pafMat = tf.cond( self.pad[2] < 0, lambda: tf.concat([ tf.concat([ tf.zeros( (1, -self.pad[2], tf.shape(self.tensor_pafMat)[1], tf.shape(self.tensor_pafMat)[-1])), tf.ones((1, -self.pad[2], tf.shape(self.tensor_pafMat)[ 1], 1)) ], axis=3), self.tensor_pafMat ], axis=0), lambda: tf.cond( tf.equal(self.pad[2], 0 ), lambda: self.tensor_pafMat[:, :, :, :], lambda: self.tensor_pafMat[:, :-self.pad[2], :, :])) # if pad[3] < 0 self.tensor_heatMat = tf.cond( self.pad[3] < 0, lambda: tf.concat([ tf.concat([ tf.zeros( (1, tf.shape(self.tensor_heatMat)[0], -self.pad[3], tf.shape(self.tensor_heatMat)[-1])), tf.ones((1, tf.shape(self.tensor_heatMat)[0], -self. pad[3], 1)) ], axis=3), self.tensor_heatMat ], axis=1), lambda: tf. cond(tf.equal(self.pad[ 3], 0), lambda: self.tensor_heatMat[:, :, :, :], lambda: self.tensor_heatMat[:, :, :-self.pad[3], :])) self.tensor_pafMat = tf.cond( self.pad[1] < 0, lambda: tf.concat([ tf.concat([ tf.zeros( (1, tf.shape(self.tensor_pafMat)[0], -self.pad[3], tf.shape(self.tensor_pafMat)[-1])), tf.ones((1, tf.shape(self.tensor_pafMat)[0], -self.pad[ 3], 1)) ], axis=3), self.tensor_pafMat ], axis=1), lambda: tf.cond( tf.equal(self.pad[3], 0 ), lambda: self.tensor_pafMat[:, :, :, :], lambda: self.tensor_pafMat[:, :, :-self.pad[3], :])) # SCALE THE image self.tensor_heatMat_up = tf.image.resize_images( self.tensor_heatMat[:, :, :, :], self.upsample_size, method=tf.image.ResizeMethod.BICUBIC) self.tensor_pafMat_up = tf.image.resize_images( self.tensor_pafMat[:, :, :, :], self.upsample_size, method=tf.image.ResizeMethod.BICUBIC) # self.tensor_heatMat_up_scale = tf.placeholder( dtype=tf.float32, shape=self.tensor_heatMat_up.get_shape()) smoother = Smoother({'data': self.tensor_heatMat_up}, 25, 3.0) gaussian_heatMat = smoother.get_output() # doing the non maximum supression max_pooled_in_tensor = tf.nn.pool(gaussian_heatMat, window_shape=(3, 3), pooling_type='MAX', padding='SAME') self.tensor_peaks = tf.where( tf.equal(gaussian_heatMat, max_pooled_in_tensor), gaussian_heatMat, tf.zeros_like(gaussian_heatMat)) smoother_final = Smoother({'data': self.tensor_heatMat_up_scale}, 25, 3.0) gaussian_heatMat_final = smoother_final.get_output() max_pooled_in_tensor_final = tf.nn.pool(gaussian_heatMat_final, window_shape=(3, 3), pooling_type='MAX', padding='SAME') self.tensor_peaks_final = tf.where( tf.equal(gaussian_heatMat_final, max_pooled_in_tensor_final), gaussian_heatMat_final, tf.zeros_like(gaussian_heatMat_final)) self.heatMat = self.pafMat = None # warm-up self.persistent_sess.run( tf.variables_initializer([ v for v in tf.global_variables() if v.name.split(':')[0] in [ x.decode('utf-8') for x in self.persistent_sess.run( tf.report_uninitialized_variables()) ] ]))
def __init__(self, graph_path, target_size=(320, 240), tf_config=None): self.target_size = target_size # load graph logger.info('loading graph from %s(default size=%dx%d)' % (graph_path, target_size[0], target_size[1])) with tf.gfile.GFile(graph_path, 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) self.graph = tf.get_default_graph() tf.import_graph_def(graph_def, name='TfPoseEstimator') self.persistent_sess = tf.Session(graph=self.graph, config=tf_config) # for op in self.graph.get_operations(): # print(op.name) # for ts in [n.name for n in tf.get_default_graph().as_graph_def().node]: # print(ts) self.tensor_image = self.graph.get_tensor_by_name( 'TfPoseEstimator/image:0') self.tensor_output = self.graph.get_tensor_by_name( 'TfPoseEstimator/Openpose/concat_stage7:0') self.tensor_heatMat = self.tensor_output[:, :, :, : 19] #[batch, height, width, channels] self.tensor_pafMat = self.tensor_output[:, :, :, 19:] # 对网络进行缩放 self.upsample_size = tf.placeholder(dtype=tf.int32, shape=(2, ), name='upsample_size') self.tensor_heatMat_up = tf.image.resize_area( self.tensor_output[:, :, :, :19], self.upsample_size, align_corners=False, name='upsample_heatmat') self.tensor_pafMat_up = tf.image.resize_area( self.tensor_output[:, :, :, 19:], self.upsample_size, align_corners=False, name='upsample_pafmat') # 获取高斯热图 smoother = Smoother({'data': self.tensor_heatMat_up}, 25, 3.0) gaussian_heatMat = smoother.get_output() # 最大池化 max_pooled_in_tensor = tf.nn.pool(gaussian_heatMat, window_shape=(3, 3), pooling_type='MAX', padding='SAME') # tf.where(tensor, a, b) # a , b 是维度相同的tensor, # 将tensor中true的位置替换为a中相同位置的值, # 将tensor中false的位置替换为b中相同位置的值 self.tensor_peaks = tf.where( tf.equal(gaussian_heatMat, max_pooled_in_tensor), gaussian_heatMat, tf.zeros_like(gaussian_heatMat)) self.heatMat = self.pafMat = None # warm-up # 初始化变量 self.persistent_sess.run( tf.variables_initializer([ v for v in tf.global_variables() if v.name.split(':')[0] in [ x.decode('utf-8') for x in self.persistent_sess.run( tf.report_uninitialized_variables()) ] ])) self.persistent_sess.run( [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up], feed_dict={ self.tensor_image: [ np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32) ], self.upsample_size: [target_size[1], target_size[0]] }) self.persistent_sess.run( [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up], feed_dict={ self.tensor_image: [ np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32) ], self.upsample_size: [target_size[1] // 2, target_size[0] // 2] }) self.persistent_sess.run( [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up], feed_dict={ self.tensor_image: [ np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32) ], self.upsample_size: [target_size[1] // 4, target_size[0] // 4] }) # logs if self.tensor_image.dtype == tf.quint8: logger.info('quantization mode enabled.')
def __init__(self, graph_path, target_size=(320, 240), tf_config=None): self.target_size = target_size # load graph logger.info('loading graph from %s(default size=%dx%d)' % (graph_path, target_size[0], target_size[1])) with tf.gfile.GFile(graph_path, 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) self.graph = tf.get_default_graph() trt_graph = trt.create_inference_graph(input_graph_def=graph_def,outputs=["Openpose/concat_stage7"], max_batch_size=1,max_workspace_size_bytes=1 << 25,precision_mode='FP16',minimum_segment_size=50) print(trt_graph) tf.import_graph_def(trt_graph, name='TfPoseEstimator') self.persistent_sess = tf.Session(graph=self.graph, config=tf_config) writer = tf.summary.FileWriter('/tmp/estimator',self.persistent_sess.graph) writer.flush() # for op in self.graph.get_operations(): # print(op.name) # for ts in [n.name for n in tf.get_default_graph().as_graph_def().node]: # print(ts) self.tensor_image = self.graph.get_tensor_by_name('TfPoseEstimator/image:0') self.tensor_output = self.graph.get_tensor_by_name('TfPoseEstimator/Openpose/concat_stage7:0') self.tensor_heatMat = self.tensor_output[:, :, :, :19] self.tensor_pafMat = self.tensor_output[:, :, :, 19:] self.upsample_size = tf.placeholder(dtype=tf.int32, shape=(2,), name='upsample_size') self.tensor_heatMat_up = tf.image.resize_area(self.tensor_output[:, :, :, :19], self.upsample_size, align_corners=False, name='upsample_heatmat') self.tensor_pafMat_up = tf.image.resize_area(self.tensor_output[:, :, :, 19:], self.upsample_size, align_corners=False, name='upsample_pafmat') smoother = Smoother({'data': self.tensor_heatMat_up}, 25, 3.0) gaussian_heatMat = smoother.get_output() max_pooled_in_tensor = tf.nn.pool(gaussian_heatMat, window_shape=(3, 3), pooling_type='MAX', padding='SAME') self.tensor_peaks = tf.where(tf.equal(gaussian_heatMat, max_pooled_in_tensor), gaussian_heatMat, tf.zeros_like(gaussian_heatMat)) self.heatMat = self.pafMat = None # warm-up self.persistent_sess.run(tf.variables_initializer( [v for v in tf.global_variables() if v.name.split(':')[0] in [x.decode('utf-8') for x in self.persistent_sess.run(tf.report_uninitialized_variables())] ]) ) self.persistent_sess.run( [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up], feed_dict={ self.tensor_image: [np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32)], self.upsample_size: [target_size[1], target_size[0]] } ) self.persistent_sess.run( [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up], feed_dict={ self.tensor_image: [np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32)], self.upsample_size: [target_size[1] // 2, target_size[0] // 2] } ) self.persistent_sess.run( [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up], feed_dict={ self.tensor_image: [np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32)], self.upsample_size: [target_size[1] // 4, target_size[0] // 4] } )