Exemple #1
0
def getNumberOfPerson(img,place):

    input_video = img

    detection_graph, category_index = backbone.set_model('ssd_mobilenet_v2_coco_2018_03_29', 'mscoco_label_map.pbtxt')

    is_color_recognition_enabled = 0

    result = object_counting_api.single_image_object_counting(input_video, detection_graph, category_index, is_color_recognition_enabled) # targeted objects counting
    
    cv2.destroyAllWindows()
    info_dict={}
    try:
        idx = result.find('person')
        info = result[idx:idx+11]

        #print(info)
        #print(int(info[-1]))

        complexity = cal_complexity(int(info[-1]))
        info_dict = {}

        info_dict['person'] = int(info[-1])
        info_dict['place']= place
        info_dict['complexity'] = complexity
        info_dict['time']=datetime.datetime.now().strftime('%Y-%m-%d-%H-%M')
    except:
        info_dict['person']= None
        info_dict['person'] = None
        info_dict['place']= None
        info_dict['complexity'] = None
        info_dict['time']=None
    
    return info_dict
def count(image="ParkingLot.jpg"):
    input_video = image

    detection_graph, category_index = backbone.set_model(
        'ssd_mobilenet_v1_coco_2018_01_28', 'mscoco_label_map.pbtxt')

    is_color_recognition_enabled = 0

    result = object_counting_api.single_image_object_counting(
        input_video, detection_graph, category_index,
        is_color_recognition_enabled)  # targeted objects counting

    print(result)
    return result
Exemple #3
0
def count(image):
  input_image = image
  detection_graph = tf.Graph()
  with detection_graph.as_default():
    od_graph_def = tf.GraphDef()
    with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
      serialized_graph = fid.read()
      od_graph_def.ParseFromString(serialized_graph)
      tf.import_graph_def(od_graph_def, name='')

  label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
  categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
  category_index = label_map_util.create_category_index(categories)


  fps = 30 # change it with your input video fps
  width = 626 # change it with your input video width
  height = 360 # change it with your input vide height
  is_color_recognition_enabled = 0

  result = object_counting_api.single_image_object_counting(input_image, detection_graph, category_index, is_color_recognition_enabled, fps, width, height) # targeted objects counting

  return result
#----------------------------------------------
#--- Author         : Ahmet Ozlu
#--- Mail           : [email protected]
#--- Date           : 27th January 2018
#----------------------------------------------

# Imports
import tensorflow as tf

# Object detection imports
from utils import backbone
from api import object_counting_api

input_video = "./input_images_and_videos/sample_input_image.jpg"

# By default I use an "SSD with Mobilenet" model here. See the detection model zoo (https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.
detection_graph, category_index = backbone.set_model(
    'ssd_mobilenet_v1_coco_2018_01_28', 'mscoco_label_map.pbtxt')

is_color_recognition_enabled = False  # set it to true for enabling the color prediction for the detected objects

result = object_counting_api.single_image_object_counting(
    input_video, detection_graph, category_index,
    is_color_recognition_enabled)  # targeted objects counting

print(result)