예제 #1
0
def run_inference_for_images_per_image(graph, image_folder, np_boxes_path,
                                       score_threshold):
    frame_lists = util.get_frames_paths(image_folder, gap=2)
    image_height, image_width = 0, 0
    with graph.as_default():
        ops = tf.get_default_graph().get_operations()
        all_tensor_names = {output.name for op in ops for output in op.outputs}
        tensor_dict = {}
        for key in [
                'num_detections', 'detection_boxes', 'detection_scores',
                'detection_classes'
        ]:
            tensor_name = key + ':0'
            if tensor_name in all_tensor_names:
                tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
                    tensor_name)

        image_tensor = tf.get_default_graph().get_tensor_by_name(
            'image_tensor:0')

        with tf.Session() as sess:
            path_box_lists = []
            for i, frame_path in enumerate(frame_lists):
                image = util.data_preprocessing(frame_path, target_size=640)
                image = np.expand_dims(image, axis=0)

                # Run inference
                output_dict = sess.run(tensor_dict,
                                       feed_dict={image_tensor: image})

                # all outputs are float32 numpy arrays, so convert types as appropriate
                output_dict['num_detections'] = int(
                    output_dict['num_detections'][0])
                output_dict['detection_classes'] = output_dict[
                    'detection_classes'][0].astype(np.int8)
                output_dict['detection_boxes'] = output_dict[
                    'detection_boxes'][0]
                output_dict['detection_scores'] = output_dict[
                    'detection_scores'][0]
                # print(output_dict)
                for score, box, _class in zip(
                        output_dict['detection_scores'],
                        output_dict['detection_boxes'],
                        output_dict['detection_classes']):
                    if score >= score_threshold:
                        path_box_lists.append([
                            frame_path, box[0], box[1], box[2], box[3], _class
                        ])
                print(i)

            sess.close()

            np.save(np_boxes_path, path_box_lists)
            print('finish boxes detection!')
예제 #2
0
def run_inference_get_feature(graph,image_folder):
    frame_lists=util.get_frames_paths(image_folder,gap=2)

    with graph.as_default():
        ops=tf.get_default_graph().get_operations()
        all_tensor_names={output.name for op in ops for output in op.outputs}
        tensor_dict={}
        for key in [
            'num_detections','detection_boxes','detection_scores',
            'detection_classes','detection_masks'
        ]:
            tensor_name=key+':0'
            if tensor_name in all_tensor_names:
                tensor_dict[key]=tf.get_default_graph().get_tensor_by_name(tensor_name)

        image_tensor=tf.get_default_graph().get_tensor_by_name('image_tensor:0')
예제 #3
0
            # return output_dict
            print('output_dict[\'detection_boxes\'] shape is {}'.format(output_dict['detection_boxes'].shape))
            print('output_dict[\'detection_scores\'] shape is {}'.format(output_dict['detection_scores'].shape))

            category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)

            image=vis_util.visualize_boxes_and_labels_on_image_array(
                image,
                output_dict['detection_boxes'],
                output_dict['detection_classes'],
                output_dict['detection_scores'],
                category_index,
                instance_masks=output_dict.get('detection_masks'),
                use_normalized_coordinates=True,
                line_thickness=3,min_score_thresh=0.3)

            plt.imsave(output_image_path,image)

            sess.close()


if __name__=='__main__':
    args=arg_parse()
    os.environ['CUDA_VISIBLE_DEVICES']=args.gpu
    np_paths_boxes_path = args.box_imgs_npy_path
    # print(image_dataset_path)
    graph=load_frozen_graph(args.forzen_graph)
    frame_lists=util.get_frames_paths(args.dataset_folder,gap=2)
    # vis_detection_result(graph,frame_lists[20],'/home/'+args.machine+'/vis_result.jpg')
    run_inference_for_images_per_image(graph,image_dataset_path,np_paths_boxes_path,0.5)
예제 #4
0
    frame_lists=util.get_frames_paths(image_folder,gap=2)

    with graph.as_default():
        ops=tf.get_default_graph().get_operations()
        all_tensor_names={output.name for op in ops for output in op.outputs}
        tensor_dict={}
        for key in [
            'num_detections','detection_boxes','detection_scores',
            'detection_classes','detection_masks'
        ]:
            tensor_name=key+':0'
            if tensor_name in all_tensor_names:
                tensor_dict[key]=tf.get_default_graph().get_tensor_by_name(tensor_name)

        image_tensor=tf.get_default_graph().get_tensor_by_name('image_tensor:0')

    #     with tf.Session() as sess:
    #
    # pass

if __name__=='__main__':
    args=arg_parse()
    os.environ['CUDA_VISIBLE_DEVICES']=args.gpu
    np_paths_boxes_path = '/home/'+args.machine+'/'+args.dataset+'_'+'img_path_box.npy'
    image_dataset_path=prefix+args.dataset+'/training/frames/'
    # print(image_dataset_path)
    graph=load_frozen_graph()
    frame_lists=util.get_frames_paths(image_dataset_path,gap=2)
    # vis_detection_result(graph,frame_lists[20],'/home/'+args.machine+'/vis_result.jpg')
    run_inference_for_images_per_image(graph,image_dataset_path,np_paths_boxes_path,0.5)