Esempio n. 1
0
def process(input_video):
    vcap = cv2.VideoCapture(input_video)

    if vcap.isOpened():
        # get vcap property
        width = int(vcap.get(3))
        height = int(vcap.get(4))

        # it gives me 0.0 :/
        fps = int(vcap.get(5))

    detection_graph, category_index = backbone.set_model('peixe_v2_coco_t2')

    #object_counting_api.object_counting(input_video, detection_graph, category_index, 0) # for counting all the objects, disabled color prediction

    #object_counting_api.object_counting(input_video, detection_graph, category_index, 1) # for counting all the objects, enabled color prediction

    targeted_objects = "peixe"  # (for counting targeted objects) change it with your targeted objects

    is_color_recognition_enabled = 0

    object_counting_api.targeted_object_counting(
        input_video, detection_graph, category_index,
        is_color_recognition_enabled, targeted_objects, fps, width,
        height)  # targeted objects counting
Esempio n. 2
0
def call_tensor():
    import tensorflow as tf

    from utils import backbone
    from api import object_counting_api
    import cv2
    from carton.cart import Cart
    # from carton.cart import
    input_video = 1
    detection_graph, category_index = backbone.set_model(
        'faster_rcnn_inception_v2_coco_2018_01_28')

    targeted_objects = "person"  # (for counting targeted objects) change it with your targeted objects
    fps = 24  # change it with your input video fps
    width = 854  # change it with your input video width
    height = 480  # change it with your input vide height
    is_color_recognition_enabled = 0

    object_counting_api.targeted_object_counting(
        input_video, detection_graph, category_index,
        is_color_recognition_enabled, targeted_objects, fps, width,
        height)  # targeted objects counting
Esempio n. 3
0
# Object detection imports
from utils import backbone
from api import object_counting_api

if tf.__version__ < '1.4.0':
    raise ImportError(
        'Please upgrade your tensorflow installation to v1.4.* or later!')

input_video = "New Office TOUR!  Karlie Kloss.mp4"

# By default I use an "SSD with Mobilenet" model here. See the detection model zoo (https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.
detection_graph, category_index = backbone.set_model(
    'ssd_mobilenet_v1_coco_2017_11_17')

#object_counting_api.object_counting(input_video, detection_graph, category_index, 0) # for counting all the objects, disabled color prediction

#object_counting_api.object_counting(input_video, detection_graph, category_index, 1) # for counting all the objects, enabled color prediction

targeted_objects = "person"  # (for counting targeted objects) change it with your targeted objects
fps = 24  # change it with your input video fps
width = 854  # change it with your input video width
height = 480  # change it with your input vide height
is_color_recognition_enabled = 0

object_counting_api.targeted_object_counting(
    input_video, detection_graph, category_index, is_color_recognition_enabled,
    targeted_objects, fps, width, height)  # targeted objects counting

#object_counting_api.object_counting(input_video, detection_graph, category_index, is_color_recognition_enabled, fps, width, height) # counting all the objects
Esempio n. 4
0
import tensorflow as tf

# Object detection imports
from utils import backbone
from api import object_counting_api

input_video = "./input_images_and_videos/demo (6).mp4"

detection_graph, category_index = backbone.set_model('custom_trained_inference_graph', 'label_map.pbtxt')



targeted_objects = "leaf, pill_pack, plastic" 
is_color_recognition_enabled = 0

object_counting_api.targeted_object_counting(input_video, detection_graph, category_index, is_color_recognition_enabled, targeted_objects) 
Esempio n. 5
0
from utils import backbone
from api import object_counting_api

INPUT_STREAM = "./MVI_6835.mp4"
CPU_EXTENSION = "/opt/intel/openvino/deployment_tools/inference_engine/lib/intel64/libcpu_extension_sse4.so"
FILE_OUTPUT = "/content/output.mp4"

# By default I use an "SSD with Mobilenet" model here. See the detection model zoo (https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.
detection_graph, category_index = backbone.set_model(
    'ssd_mobilenet_v1_coco_2018_01_28', 'mscoco_label_map.pbtxt')

targeted_objects = "person, bicycle, bus, car, motorcycle, airplane, train, truck, boat, traffic light, fire hydrant, stop sign, parking meter, book, cell phone, laptop, wine glass, bottle, handbag, cat, dog, bird"  # (for counting targeted objects) change it with your targeted objects
is_color_recognition_enabled = 0

object_counting_api.targeted_object_counting(
    INPUT_STREAM, detection_graph, category_index,
    is_color_recognition_enabled,
    targeted_objects)  # targeted objects counting


def get_args():
    '''
    Gets the arguments from the command line.
    '''
    parser = argparse.ArgumentParser("Run inference on an input video")
    # -- Create the descriptions for the commands
    m_desc = "The location of the model XML file"
    i_desc = "The location of the input file"
    d_desc = "The device name, if not 'CPU'"
    ### TODO: Add additional arguments and descriptions for:
    ###       1) Different confidence thresholds used to draw bounding boxes
    ###       2) The user choosing the color of the bounding boxes