Example #1
0
def infer_image(graph, img, frame):

    # Load the image as a half-precision floating point array
    graph.LoadTensor(img, 'user object')

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption(mvnc.GraphOption.TIME_TAKEN)

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd(output, CONFIDANCE_THRESHOLD,
                                         frame.shape)

    # Print the results (each image/frame may have multiple objects)
    print("I found these objects in " + " ( %.2f ms ):" %
          (numpy.sum(inference_time)))
    flag = 0

    for i in range(0, output_dict['num_detections']):
        print("%3.1f%%\t" % output_dict['detection_scores_' + str(i)] +
              labels[int(output_dict['detection_classes_' + str(i)])] +
              ": Top Left: " +
              str(output_dict['detection_boxes_' + str(i)][0]) +
              " Bottom Right: " +
              str(output_dict['detection_boxes_' + str(i)][1]))

        # Draw bounding boxes around valid detections
        (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
        (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]
        if labels[output_dict.get('detection_classes_' +
                                  str(i))] == '5: bottle':
            flag = 1
            xdata1 = x1
            xdata2 = x2

        # Prep string to overlay on the image
        display_str = (labels[output_dict.get('detection_classes_' + str(i))] +
                       ": " +
                       str(output_dict.get('detection_scores_' + str(i))) +
                       "%")

        frame = visualize_output.draw_bounding_box(y1,
                                                   x1,
                                                   y2,
                                                   x2,
                                                   frame,
                                                   thickness=4,
                                                   color=(255, 255, 0),
                                                   display_str=display_str)
    print('\n')

    # If a display is available, show the image on which inference was performed
    if 'DISPLAY' in os.environ:
        cv2.imshow('NCS live inference', frame)
    if flag == 0:
        return int((xdata1 + xdata2) / 2) - 288
    else:
        return -800
def infer_image_v2( graph, img_draw, img):

    # The first inference takes an additional ~20ms due to memory 
    # initializations, so we make a 'dummy forward pass'.
    graph.LoadTensor( img, 'user object' )
    output, userobj = graph.GetResult()

    # Load the image as a half-precision floating point array
    graph.LoadTensor( img, 'user object' )

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption( mvnc.GraphOption.TIME_TAKEN )

    # Deserialize the output into a python dictionary
    if ARGS['network'] == 'SSD':
        output_dict = deserialize_output.ssd( output, CONFIDANCE_THRESHOLD, img_draw.shape )
    elif ARGS['network'] == 'TinyYolo':
        output_dict = deserialize_output.tinyyolo( output, CONFIDANCE_THRESHOLD, img_draw.shape )
        
    tab = []

    for i in range( 0, output_dict['num_detections'] ):
        tabbis = []
        tabbis.append( '{}'.format(output_dict['detection_scores_' + str(i)]) )
        tabbis.append( labels[ int(output_dict['detection_classes_' + str(i)]) ] )
        tabbis.append( output_dict['detection_boxes_' + str(i)][0] )
        tabbis.append( output_dict['detection_boxes_' + str(i)][1] )
        tab.append(tabbis)

        # Draw bounding boxes around valid detections 
        (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
        (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

        # Prep string to overlay on the image
        display_str = ( 
                labels[output_dict.get('detection_classes_' + str(i))]
                + ": "
                + str( output_dict.get('detection_scores_' + str(i) ) )
                + "%" )

        img_draw = visualize_output.draw_bounding_box( 
                       y1, x1, y2, x2, 
                       img_draw,
                       thickness=4,
                       color=(255, 255, 0),
                       display_str=display_str )

    print( "==============================================================\n" )

    if not os.path.exists('pictures'):
        os.makedirs('pictures')

    skimage.io.imshow( img_draw )
    filename = 'pictures/{}.jpg'.format(str(round(time.time() * 1000)))
    skimage.io.imsave(filename, img_draw)
        
    return tab
Example #3
0
def infer_image(graph, img, frame, fps):

    # Load the image as a half-precision floating point array
    graph.LoadTensor(img, 'user object')

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption(mvnc.GraphOption.TIME_TAKEN)

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd(output, CONFIDANCE_THRESHOLD,
                                         frame.shape)

    # Print the results (each image/frame may have multiple objects)
    for i in range(0, output_dict['num_detections']):

        # Filter a specific class/category
        if (output_dict.get('detection_classes_' + str(i)) == CLASS_PERSON):

            #cur_time = strftime( "%Y_%m_%d_%H_%M_%S", localtime() )
            #print( "Person detected on " + cur_time )

            # Extract top-left & bottom-right coordinates of detected objects
            (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
            (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

            # Prep string to overlay on the image
            display_str = (
                labels[output_dict.get('detection_classes_' + str(i))] + ": " +
                str(output_dict.get('detection_scores_' + str(i))) + "%")

            # Overlay bounding boxes, detection class and scores
            frame = visualize_output.draw_bounding_box(y1,
                                                       x1,
                                                       y2,
                                                       x2,
                                                       frame,
                                                       thickness=4,
                                                       color=(255, 255, 0),
                                                       display_str=display_str)

            # Capture snapshots
            #img = Image.fromarray( frame )
            #photo = ( os.path.dirname(os.path.realpath(__file__))
            #          + "/captures/photo_"
            #          + cur_time + ".jpg" )
            #img.save( photo )

    # If a display is available, show the image on which inference was performed
    #if 'DISPLAY' in os.environ:
    #    img.show()

    frame = visualize_output.draw_fps(frame, fps)
    img = Image.fromarray(frame)
    imgbytearr = io.BytesIO()
    img.save(imgbytearr, format='jpeg')
    mqttframe = imgbytearr.getvalue()
    client.publish(MQTT_TOPIC, mqttframe, 0, False)
def infer_image( graph, img ):

    # Read original image, so we can perform visualization ops on it
    img_draw = skimage.io.imread( ARGS['image'] )

    # The first inference takes an additional ~20ms due to memory 
    # initializations, so we make a 'dummy forward pass'.
    graph.LoadTensor( img, 'user object' )
    output, userobj = graph.GetResult()

    # Load the image as a half-precision floating point array
    graph.LoadTensor( img, 'user object' )

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption( mvnc.GraphOption.TIME_TAKEN )

    # Deserialize the output into a python dictionary
    if ARGS['network'] == 'SSD':
        output_dict = deserialize_output.ssd( output, CONFIDANCE_THRESHOLD, img_draw.shape )
    elif ARGS['network'] == 'TinyYolo':
        output_dict = deserialize_output.tinyyolo( output, CONFIDANCE_THRESHOLD, img_draw.shape )

    # Print the results
    print( "\n==============================================================" )
    print( "I found these objects in", ntpath.basename( ARGS['image'] ) )
    print( "Execution time: " + str( np.sum( inference_time ) ) + "ms" )
    print( "--------------------------------------------------------------" )
    for i in range( 0, output_dict['num_detections'] ):
        print( "%3.1f%%\t" % output_dict['detection_scores_' + str(i)]
               + labels[ int(output_dict['detection_classes_' + str(i)]) ]
               + ": Top Left: " + str( output_dict['detection_boxes_' + str(i)][0] )
               + " Bottom Right: " + str( output_dict['detection_boxes_' + str(i)][1] ) )

        # Draw bounding boxes around valid detections 
        (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
        (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

        # Prep string to overlay on the image
        display_str = ( 
                labels[output_dict.get('detection_classes_' + str(i))]
                + ": "
                + str( output_dict.get('detection_scores_' + str(i) ) )
                + "%" )

        img_draw = visualize_output.draw_bounding_box( 
                       y1, x1, y2, x2, 
                       img_draw,
                       thickness=4,
                       color=(255, 255, 0),
                       display_str=display_str )

    print( "==============================================================\n" )

    # If a display is available, show the image on which inference was performed
    if 'DISPLAY' in os.environ:
        skimage.io.imshow( img_draw )
        skimage.io.show()
def infer_image( graph, img ):

    # Read original image, so we can perform visualization ops on it
    img_draw = skimage.io.imread( ARGS.image )

    # The first inference takes an additional ~20ms due to memory 
    # initializations, so we make a 'dummy forward pass'.
    graph.LoadTensor( img, 'user object' )
    output, userobj = graph.GetResult()

    # Load the image as a half-precision floating point array
    graph.LoadTensor( img, 'user object' )

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption( mvnc.GraphOption.TIME_TAKEN )

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd( 
                      output, 
                      CONFIDANCE_THRESHOLD, 
                      img_draw.shape )

    # Print the results
    print( "\n==============================================================" )
    print( "I found these objects in", ntpath.basename( ARGS.image ) )
    print( "Execution time: " + str( numpy.sum( inference_time ) ) + "ms" )
    print( "--------------------------------------------------------------" )
    for i in range( 0, output_dict['num_detections'] ):
        print( "%3.1f%%\t" % output_dict['detection_scores_' + str(i)]
               + labels[ int(output_dict['detection_classes_' + str(i)]) ]
               + ": Top Left: " + str( output_dict['detection_boxes_' + str(i)][0] )
               + " Bottom Right: " + str( output_dict['detection_boxes_' + str(i)][1] ) )

        # Draw bounding boxes around valid detections 
        (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
        (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

        # Prep string to overlay on the image
        display_str = ( 
                labels[output_dict.get('detection_classes_' + str(i))]
                + ": "
                + str( output_dict.get('detection_scores_' + str(i) ) )
                + "%" )

        img_draw = visualize_output.draw_bounding_box( 
                       y1, x1, y2, x2, 
                       img_draw,
                       thickness=4,
                       color=(255, 255, 0),
                       display_str=display_str )

    print( "==============================================================\n" )

    # If a display is available, show the image on which inference was performed
    if 'DISPLAY' in os.environ:
        skimage.io.imshow( img_draw )
        skimage.io.show()
Example #6
0
def ssd_infer_image(tensor, image_name):
    original_image = skimage.io.imread(image_name)
    output, inference_time = mv.manage_NCS(graph_file, tensor)
    output_dict = deserialize_output.ssd(output, CONFIDENCE_THRESHOLD,
                                         original_image.shape)

    # Compile results
    results = "SSD Object Detection results:\n\n This image contains:\n"

    for i in range(0, output_dict['num_detections']):
        results = results + str(output_dict['detection_scores_' + str(i)]) + "% confidence it could be a " + \
                  labels[int(output_dict['detection_classes_'   + str(i)])][3:] + "\n"

        # Draw bounding boxes around valid detections
        (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
        (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

        # Prep string to overlay on the image
        display_str = (labels[output_dict.get('detection_classes_' + str(i))] +
                       str(output_dict.get('detection_scores_' + str(i))) +
                       "%")

        original_image = visualize_output.draw_bounding_box(
            y1,
            x1,
            y2,
            x2,
            original_image,
            thickness=6,
            color=(255, 255, 0),
            display_str=display_str)
    results = results + "\n\nExecution time: " + str(
        int(numpy.sum(inference_time))) + " ms"

    return original_image, results
Example #7
0
def infer_image(graph, img, frame):

    # Load the image as a half-precision floating point array
    graph.LoadTensor(img, 'user object')

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption(mvnc.GraphOption.TIME_TAKEN)

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd(output, CONFIDANCE_THRESHOLD,
                                         frame.shape)

    global lastcount
    count = 0
    selected = 'bottle'

    for i in range(0, output_dict['num_detections']):
        print("%3.1f%%\t" % output_dict['detection_scores_' + str(i)] +
              labels[int(output_dict['detection_classes_' + str(i)])] +
              ": Top Left: " +
              str(output_dict['detection_boxes_' + str(i)][0]) +
              " Bottom Right: " +
              str(output_dict['detection_boxes_' + str(i)][1]))
        if str(selected) in labels[int(output_dict['detection_classes_' +
                                                   str(i)])]:
            count = count + 1
            # Draw bounding boxes around valid detections
            (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
            (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

            # Prep string to overlay on the image
            display_str = (
                labels[output_dict.get('detection_classes_' + str(i))] + ": " +
                str(output_dict.get('detection_scores_' + str(i))) + "%")

            frame = visualize_output.draw_bounding_box(y1,
                                                       x1,
                                                       y2,
                                                       x2,
                                                       frame,
                                                       thickness=4,
                                                       color=(255, 255, 0),
                                                       display_str=display_str)
    if lastcount != count and selected != '':
        datastring = 'Item=' + selected + "&Count=" + str(count)
        file = open('./bottles.txt', 'w')
        file.write(datastring)
        file.close()
        lastcount = count
    print(count)
    print('\n')

    # If a display is available, show the image on which inference was performed
    if 'DISPLAY' in os.environ:
        cv2.imshow('NCS live inference', frame)
Example #8
0
def infer_image( graph, img, frame ):

    # Load the image as a half-precision floating point array
    graph.LoadTensor( img, 'user object' )

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption( mvnc.GraphOption.TIME_TAKEN )

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd( 
                      output, 
                      CONFIDANCE_THRESHOLD, 
                      frame.shape )

    # Print the results (each image/frame may have multiple objects)
    #print( "I found these objects in "
            #+ " ( %.2f ms ):" % ( numpy.sum( inference_time ) ) )
    global lastcount
    count = 0
    for i in range( 0, output_dict['num_detections'] ):
        print( "%3.1f%%\t" % output_dict['detection_scores_' + str(i)] 
               + labels[ int(output_dict['detection_classes_' + str(i)]) ]
               + ": Top Left: " + str( output_dict['detection_boxes_' + str(i)][0] )
               + " Bottom Right: " + str( output_dict['detection_boxes_' + str(i)][1] ) )
        if 'person' in labels[ int(output_dict['detection_classes_' + str(i)]) ]:
            count = count + 1
            # Draw bounding boxes around valid detections 
            (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
            (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

            # Prep string to overlay on the image
            display_str = ( 
                labels[output_dict.get('detection_classes_' + str(i))]
                + ": "
                + str( output_dict.get('detection_scores_' + str(i) ) )
                + "%" )

            frame = visualize_output.draw_bounding_box( 
                       y1, x1, y2, x2, 
                       frame,
                       thickness=4,
                       color=(255, 255, 0),
                       display_str=display_str )
    if lastcount != count:
        file = open('../node_server/client/people.txt','w')
        file.write(str(count)) 
        file.close() 

    lastcount = count
    print( str(count) + ' people\n' )

    # If a display is available, show the image on which inference was performed
    if 'DISPLAY' in os.environ:
        cv2.imshow( 'NCS live inference', frame )
Example #9
0
def infer_image(graph, img, frame):
    # Load the image as a half-precision floating point array
    graph.LoadTensor(img, 'user object')

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption(mvnc.GraphOption.TIME_TAKEN)

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd(output, CONFIDANCE_THRESHOLD,
                                         frame.shape)

    # Print the results (each image/frame may have multiple objects)
    output_str = "\n==============================================================\n"
    output_str += "Execution time: " + "%.1fms" % (
        numpy.sum(inference_time)) + "\n"
    for i in range(0, output_dict['num_detections']):
        output_str += "%3.1f%%\t" % output_dict['detection_scores_' + str(i)]
        output_str += labels[int(output_dict['detection_classes_' + str(i)])]
        output_str += ": Top Left: " + str(
            output_dict['detection_boxes_' + str(i)][0])
        output_str += " Bottom Right: " + str(
            output_dict['detection_boxes_' + str(i)][1])
        output_str += "\n"

        # Draw bounding boxes around valid detections
        (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
        (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

        label = labels[output_dict.get('detection_classes_' + str(i))]
        labelsArray = label.split(":")
        if len(labelsArray) == 2:
            label = labelsArray[1]

        # Prep string to overlay on the image
        display_str = (label + ": " +
                       str(output_dict.get('detection_scores_' + str(i))) +
                       "%")

        frame = visualize_output.draw_bounding_box(y1,
                                                   x1,
                                                   y2,
                                                   x2,
                                                   frame,
                                                   thickness=2,
                                                   fontsize=20,
                                                   outlineColor=(0, 255, 0),
                                                   textColor=(0, 255, 0),
                                                   display_str=display_str)

    output_str += "==============================================================\n"

    print(output_str)

    return frame
Example #10
0
def infer_image( graph, img, frame ):

    # Load the image as a half-precision floating point array
    graph.LoadTensor( img, 'user object' )

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption( mvnc.GraphOption.TIME_TAKEN )

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd( 
                      output, 
                      CONFIDANCE_THRESHOLD, 
                      frame.shape )

    # Print the results (each image/frame may have multiple objects)
    for i in range( 0, output_dict['num_detections'] ):

        # Filter a specific class/category
        if( output_dict.get( 'detection_classes_' + str(i) ) == CLASS_PERSON ):

            cur_time = strftime( "%Y_%m_%d_%H_%M_%S", localtime() )
            print( "Person detected on " + cur_time )

            # Extract top-left & bottom-right coordinates of detected objects 
            (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
            (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

            # Prep string to overlay on the image
            display_str = ( 
                labels[output_dict.get('detection_classes_' + str(i))]
                + ": "
                + str( output_dict.get('detection_scores_' + str(i) ) )
                + "%" )

            # Overlay bounding boxes, detection class and scores
            frame = visualize_output.draw_bounding_box( 
                        y1, x1, y2, x2, 
                        frame,
                        thickness=4,
                        color=(255, 255, 0),
                        display_str=display_str )

            # Capture snapshots
            img = Image.fromarray( frame )
            photo = ( os.path.dirname(os.path.realpath(__file__))
                      + "/captures/photo_"
                      + cur_time + ".jpg" )
            img.save( photo )

    # If a display is available, show the image on which inference was performed
    if 'DISPLAY' in os.environ:
        img.show()
Example #11
0
def infer_image(graph, img, frame):

    # Load the image as a half-precision floating point array
    graph.LoadTensor(img, 'user object')

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption(mvnc.GraphOption.TIME_TAKEN)

    # Deserialize the output into a python dictionary
    #print("the shape of the frame is "+str(frame.shape))
    output_dict = deserialize_output.ssd(output, CONFIDANCE_THRESHOLD,
                                         frame.shape)

    # Print the results (each image/frame may have multiple objects)
    #print( "I found these objects in "
    #       + " ( %.2f ms ):" % ( numpy.sum( inference_time ) ) )

    for i in range(0, output_dict['num_detections']):
        if (i > 0):
            f.write(" and ")
        f.write("%3.1f%%\t" % output_dict['detection_scores_' + str(i)] +
                labels[int(output_dict['detection_classes_' + str(i)])] +
                ": Top Left: " +
                str(output_dict['detection_boxes_' + str(i)][0]) +
                " Bottom Right: " +
                str(output_dict['detection_boxes_' + str(i)][1]))
        #f.write('\n')

        # Draw bounding boxes around valid detections
        (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
        (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

        # Prep string to overlay on the image
        display_str = (labels[output_dict.get('detection_classes_' + str(i))] +
                       ": " +
                       str(output_dict.get('detection_scores_' + str(i))) +
                       "%")

        frame = visualize_output.draw_bounding_box(y1,
                                                   x1,
                                                   y2,
                                                   x2,
                                                   frame,
                                                   thickness=4,
                                                   color=(255, 255, 0),
                                                   display_str=display_str)
    f.write('\n')
    return frame
def infer_image( graph, img, frame ):

    # Load the image as a half-precision floating point array
    graph.LoadTensor( img, 'user object' )

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption( mvnc.GraphOption.TIME_TAKEN )

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd( 
                      output, 
                      CONFIDANCE_THRESHOLD, 
                      frame.shape )

    # Print the results (each image/frame may have multiple objects)
    print( "I found these objects in "
            + " ( %.2f ms ):" % ( numpy.sum( inference_time ) ) )

    for i in range( 0, output_dict['num_detections'] ):
        print( "%3.1f%%\t" % output_dict['detection_scores_' + str(i)] 
               + labels[ int(output_dict['detection_classes_' + str(i)]) ]
               + ": Top Left: " + str( output_dict['detection_boxes_' + str(i)][0] )
               + " Bottom Right: " + str( output_dict['detection_boxes_' + str(i)][1] ) )

        # Draw bounding boxes around valid detections 
        (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
        (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

        # Prep string to overlay on the image
        display_str = ( 
                labels[output_dict.get('detection_classes_' + str(i))]
                + ": "
                + str( output_dict.get('detection_scores_' + str(i) ) )
                + "%" )

        frame = visualize_output.draw_bounding_box( 
                       y1, x1, y2, x2, 
                       frame,
                       thickness=4,
                       color=(255, 255, 0),
                       display_str=display_str )
    print( '\n' )

    # If a display is available, show the image on which inference was performed
    if 'DISPLAY' in os.environ:
        cv2.imshow( 'NCS live inference', frame )
def infer_image(graph, img, frame):

    # Load the image as a half-precision floating point array
    graph.LoadTensor(img, 'user object')

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption(mvnc.GraphOption.TIME_TAKEN)

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd(output, CONFIDANCE_THRESHOLD,
                                         frame.shape)

    # Print the results (each image/frame may have multiple objects)
    for i in range(0, output_dict['num_detections']):

        # Filter a specific class/category
        if (output_dict.get('detection_classes_' + str(i)) == CLASS_PERSON):

            # Extract top-left & bottom-right coordinates of detected objects
            (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
            (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

            # Prep string to overlay on the image
            display_str = (
                labels[output_dict.get('detection_classes_' + str(i))] + ": " +
                str(output_dict.get('detection_scores_' + str(i))) + "%")

            # Overlay bounding boxes, detection class and scores
            frame = visualize_output.draw_bounding_box(y1,
                                                       x1,
                                                       y2,
                                                       x2,
                                                       frame,
                                                       thickness=4,
                                                       color=(255, 255, 0),
                                                       display_str=display_str)

    # If a display is available, show the image on which inference was performed
    if 'DISPLAY' in os.environ:
        cv2.imshow('NCS live inference', frame)
Example #14
0
def infer_image(graph, img, frame):
    # Get the results from NCS
    output, inference_time = mv.infer_image(graph, img)

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd(output, CONFIDENCE_THRESHOLD,
                                         frame.shape)

    # Print the results (each image/frame may have multiple objects)
    for i in range(0, output_dict['num_detections']):
        # Filter a specific class/category
        if (output_dict.get('detection_classes_' + str(i)) == BANNED_CLASS):
            print("Illegal object found!")
        cur_time = strftime("%Y_%m_%d_%H_%M_%S", localtime())

        # Extract top-left & bottom-right coordinates of detected objects
        (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
        (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

        # Prep string to overlay on the image
        display_str = (labels[output_dict.get('detection_classes_' + str(i))] +
                       ": " +
                       str(output_dict.get('detection_scores_' + str(i))) +
                       "%")

        # Overlay bounding boxes, detection class and scores
        frame = visualize_output.draw_bounding_box(y1,
                                                   x1,
                                                   y2,
                                                   x2,
                                                   frame,
                                                   thickness=4,
                                                   color=(255, 255, 0),
                                                   display_str=display_str)

        # Capture snapshots
        photo = (os.path.dirname(os.path.realpath(__file__)) +
                 "/captures/photo_" + cur_time + ".jpg")
        cv2.imwrite(photo, frame)

    # If a display is available, show the image on which inference was performed
    if 'DISPLAY' in os.environ:
        cv2.imshow('NCS live inference', frame)
def show_image(num_detections, output_dict, movidius, frame, labels):

    for i in range(0, num_detections):

        box = output_dict['detection_boxes_%i' % i]
        score = output_dict['detection_scores_%i' % i]
        claz = output_dict['detection_classes_%i' % i]
        label = labels[claz]

        (y1, x1), (y2, x2) = box
        display_str = ('{}: {}%'.format(label, score))
        frame = visualize_output.draw_bounding_box(y1,
                                                   x1,
                                                   y2,
                                                   x2,
                                                   frame,
                                                   thickness=1,
                                                   color=(255, 255, 0),
                                                   display_str=display_str)

    cv2.imshow('Movidius', frame)
    if (cv2.waitKey(5) & 0xFF == ord('q')):
        dispose(movidius, graph)
Example #16
0
def show_inference(obj_list, frame, record_bool):
    for obj in obj_list:
        (y1, x1) = obj[0][0]
        (y2, x2) = obj[0][1]
        display_str = (str(obj[1])
                        + ": "
                        + str( obj[2] )
                        + "%" )

        frame = visualize_output.draw_bounding_box( 
                y1, x1, y2, x2, 
                frame,
                thickness=4,
                color=(255, 255, 0),
                display_str=display_str )
    if record_bool:
        cv2.line(frame, (line_x1, line_y1), (line_x2, line_y2), (0,255,0), 5)
        record_file.write(frame)

    # If a display is available, show the image on which inference was performed
    if debug == True and 'DISPLAY' in os.environ:
        cv2.line(frame, (line_x1, line_y1), (line_x2, line_y2), (0,255,0), 5)
        cv2.imshow( 'Live inference', frame )
def infer_image(graph, img, frame):
    #from pySX127x_master.Tegwyns_LoRa_Beacon import LoRaBeacon
    #from pySX127x_master import Tegwyns_LoRa_Beacon
    # Load the image as a half-precision floating point array
    graph.LoadTensor(img, 'user object')

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption(mvnc.GraphOption.TIME_TAKEN)

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd(output, CONFIDANCE_THRESHOLD,
                                         frame.shape)

    # Print the results (each image/frame may have multiple objects)
    for i in range(0, output_dict['num_detections']):

        # Filter a specific class/category
        if (output_dict.get('detection_classes_' + str(i)) == CLASS_PERSON):

            cur_time = strftime("%Y_%m_%d_%H_%M_%S", localtime())
            print("Person detected on " + cur_time)
            print(".... Press q to quit ..... ")

            # Extract top-left & bottom-right coordinates of detected objects
            (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
            (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]
            #print (y1, x1)
            # Prep string to overlay on the image
            display_str = (
                labels[output_dict.get('detection_classes_' + str(i))] + ": " +
                str(output_dict.get('detection_scores_' + str(i))) + "%")
            print(display_str)
            print(y1, x1)
            print(y2, x2)
            # Overlay bounding boxes, detection class and scores
            frame = visualize_output.draw_bounding_box(y1,
                                                       x1,
                                                       y2,
                                                       x2,
                                                       frame,
                                                       thickness=4,
                                                       color=(255, 255, 0),
                                                       display_str=display_str)
            global myString
            myString = display_str + " , " + "(" + str(y1) + "," + str(
                x1) + ")" + "," + "(" + str(y2) + "," + str(x2) + ")"

            ###########################################################################################
            lora.start()
            ###########################################################################################

            # Capture snapshots
            photo = (os.path.dirname(os.path.realpath(__file__)) +
                     "/captures/photo_" + cur_time + ".jpg")
            cv2.imwrite(photo, frame)

    # If a display is available, show the image on which inference was performed
    if 'DISPLAY' in os.environ:
        cv2.imshow('NCS live inference', frame)
Example #18
0
def infer_image(graph, img, frame):

    # Load the image as a half-precision floating point array
    graph.LoadTensor(img, 'user object')

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption(mvnc.GraphOption.TIME_TAKEN)

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd(output, CONFIDANCE_THRESHOLD,
                                         frame.shape)

    # Print the results (each image/frame may have multiple objects)
    for i in range(0, output_dict['num_detections']):

        # Filter a specific class/category
        if (output_dict.get('detection_classes_' + str(i)) == CLASS_PERSON):

            # Time
            d = datetime.now()
            cur_time = strftime("%Y_%m_%d_%H_%M_%S", localtime())
            print("Person detected on " + cur_time)

            # Extract top-left & bottom-right coordinates of detected objects
            (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
            (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

            # Prep string to overlay on the image
            display_str = (
                labels[output_dict.get('detection_classes_' + str(i))] + ": " +
                str(output_dict.get('detection_scores_' + str(i))) + "%")

            # Overlay bounding boxes, detection class and scores
            frame = visualize_output.draw_bounding_box(y1,
                                                       x1,
                                                       y2,
                                                       x2,
                                                       frame,
                                                       thickness=4,
                                                       color=(255, 255, 0),
                                                       display_str=display_str)

            # Capture snapshots
            photo = (os.path.dirname(os.path.realpath(__file__)) +
                     "/captures/photo_" + cur_time + ".jpg")

            cv2.imwrite(photo, frame)

            #IFTT notification
            global d_last
            d_diff = d - d_last

            if (d_diff.seconds > MINTIME):
                print('\033[31m' + 'Send Notification to IFTTT --- ' +
                      '\033[0m')  # Red text
                #print(d)
                #print(d_last)
                #print(d_diff)
                #print("    ")
                r = requests.post(
                    'https://maker.ifttt.com/trigger/rasp_seccam_triggered/with/key/c_6oKb50WdIWAaelvo3EINQ8ZU9ibwxNFJiBV1phPuh',
                    params={
                        "value1": cur_time,
                        "value2": photo,
                        "value3": "none"
                    })
                os.system("rclone copy " + photo + " gdrive:rclone")
                d_last = d

    # If a display is available, show the image on which inference was performed
    if 'DISPLAY' in os.environ:
        cv2.imshow('NCS live inference', frame)
def infer_image_fps(graph, img, frame, fps):
    global direction
    global counter
    global plotbuff
    a = []
    # Load the image as a half-precision floating point array
    graph.LoadTensor(img, 'user object')

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption(mvnc.GraphOption.TIME_TAKEN)

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd(output, CONFIDANCE_THRESHOLD,
                                         frame.shape)

    #    elapsedtime = time.time() - starttime
    # Print the results (each image/frame may have multiple objects)
    #    print( "I found these objects in ( %.2f ms ):" % ( numpy.sum( inference_time ) ) )
    inftime = numpy.sum(inference_time)
    numobj = (output_dict['num_detections'])

    # create array for detected obj
    a = [[] for _ in range(numobj)]

    #    print (numobj)
    cpu = psutil.cpu_percent()

    for i in range(0, output_dict['num_detections']):
        print("%3.1f%%\t" % output_dict['detection_scores_' + str(i)] +
              labels[int(output_dict['detection_classes_' + str(i)])] +
              ": Top Left: " +
              str(output_dict['detection_boxes_' + str(i)][0]) +
              " Bottom Right: " +
              str(output_dict['detection_boxes_' + str(i)][1]))
        #        print(str(i))
        a[i].append(output_dict['detection_scores_' + str(i)])
        a[i].append(labels[int(output_dict['detection_classes_' + str(i)])])
        a[i].append(str(output_dict['detection_boxes_' + str(i)][0]))
        a[i].append(str(output_dict['detection_boxes_' + str(i)][1]))
        # Draw bounding boxes around valid detections
        (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
        (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]
        # spencer: moving direction...
        avgx = x2 - x1
        avgy = y2 - y1
        prevavg = (y1, x1)

        plotbuff.appendleft(prevavg)

        for j in np.arange(1, len(plotbuff)):
            if plotbuff[j - 1] is None or plotbuff[j] is None:
                continue
            if counter >= 4 and j == 1 and len(plotbuff) == PLOTBUFFSIZE:
                #                dX = plotbuff[4][0] - plotbuff[j][0]
                dY = plotbuff[j][0] - plotbuff[4][0]
                #                dY = plotbuff[4][1] - plotbuff[j][1]
                dX = plotbuff[j][1] - plotbuff[4][1]
                (dirY, dirX) = ("", "")
                #                print((dY,dX))

                if np.abs(dX) > 10:
                    dirX = "EAST" if np.sign(dX) == 1 else "WEST"

                if np.abs(dY) > 20:
                    dirY = "SOUTH" if np.sign(dY) == 1 else "NORTH"

                if dirX != "" and dirY != "":
                    direction = "{}-{}".format(dirY, dirX)

                else:
                    direction = dirX if dirX != "" else dirY

        print(direction)
        print(avgx)
        if direction == "EAST" or direction == "EAST-SOUTH" or direction == "EAST-NORTH":
            if avgx >= width - 200:
                print("{} MOVING EAST OUT OF THE SCENE", format(avgx))
        if direction == "WEST" or direction == "WEST-SOUTH" or direction == "WEST-NORTH":
            if avgx <= 200:
                print("{} MOVING WEST OUT OF THE SCENE", format(avgx))

        # Prep string to overlay on the image
        display_str = (labels[output_dict.get('detection_classes_' + str(i))] +
                       ": " +
                       str(output_dict.get('detection_scores_' + str(i))) +
                       "%")

        frame = visualize_output.draw_bounding_box(y1,
                                                   x1,
                                                   y2,
                                                   x2,
                                                   frame,
                                                   thickness=4,
                                                   color=(255, 255, 0),
                                                   display_str=display_str)
        cv2.putText(frame, 'FPS:' + str(fps), (10, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 2,
                    cv2.LINE_AA)
        cv2.putText(frame, direction, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.65,
                    (0, 0, 255), 3)
#    print( '\n' )

# If a display is available, show the image on which inference was performed
    counter += 1
    if displayit == "on":
        cv2.imshow('NCS live inference', frame)

    # need to log here.
#    print (elapsedtime)
# for tracking one human...
    f.write(
        str("{0:.2f}".format(elapsedtime)) + '\t' + str(cpu) + '\t' +
        str("{0:.2f}".format(inftime)) + '\t' + str("{0:.2f}".format(fps)) +
        '\t' + str(numobj) + '\t' + str(direction) + '\t' + str(a) + '\n')
    #    print(a)
    # need plots...! for multiple objects
    del (a)
def infer_image(graph, img, frame):

    # Load the image as a half-precision floating point array
    graph.LoadTensor(img, 'user object')

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption(mvnc.GraphOption.TIME_TAKEN)

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd(output, CONFIDANCE_THRESHOLD,
                                         frame.shape)

    # Print the results (each image/frame may have multiple objects)
    for i in range(0, output_dict['num_detections']):

        # Filter a specific class/category
        if ((output_dict.get('detection_classes_' + str(i)) == CLASS_PERSON) or
            (output_dict.get('detection_classes_' + str(i)) == CLASS_CAR) or
            (output_dict.get('detection_classes_' + str(i)) == CLASS_FACE)):

            cur_time = strftime("%Y_%m_%d_%H_%M_%S", localtime())
            print("Person detected on " + cur_time)

            # Print the results (each image/frame may have multiple objects)
            print("I found these objects in " + " ( %.2f ms ):" %
                  (numpy.sum(inference_time)))

            print("%3.1f%%\t" % output_dict['detection_scores_' + str(i)] +
                  labels[int(output_dict['detection_classes_' + str(i)])] +
                  ": Top Left: " +
                  str(output_dict['detection_boxes_' + str(i)][0]) +
                  " Bottom Right: " +
                  str(output_dict['detection_boxes_' + str(i)][1]))

            # Extract top-left & bottom-right coordinates of detected objects
            (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
            (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

            #center x, y target point of object box
            cx = x1 + (x2 - x1) / 2
            cy = y1 + (y2 - y1) / 2
            print(cx, cy)
            # print target number 終端顯示目標號碼
            print('#' + str(i + 1))
            # 追蹤目標識別率最大#1 OR #2
            if (i + 1) == 1 or 2:
                #servo motor
                if cx < width * 3 / 7:
                    arduino.write(b'4')
                    print('4')
                if cx < width * 2 / 7:
                    arduino.write(b'4')
                    print('44')
                if cx < width * 1 / 7:
                    arduino.write(b'44')
                    print('444')
                if cx > width * 4 / 7:
                    arduino.write(b'6')
                    print('6')
                if cx > width * 5 / 7:
                    arduino.write(b'6')
                    print('66')
                if cx > width * 6 / 7:
                    arduino.write(b'66')
                    print('666')

                if cy < height * 3 / 7:
                    arduino.write(b'2')
                    print('2')
                if cy < height * 2 / 7:
                    arduino.write(b'2')
                    print('22')
                if cy < height * 1 / 7:
                    arduino.write(b'22')
                    print('222')

                if cy > height * 4 / 7:
                    arduino.write(b'8')
                    print('8')
                if cy > height * 5 / 7:
                    arduino.write(b'8')
                    print('88')
                if cy > height * 6 / 7:
                    arduino.write(b'88')
                    print('888')
            '''
            #servo motor                      
            if cx < width*3 / 7 :
                 arduino.write(b'4')
                 print ('4')
            if cx < width*2 / 7 :
                 arduino.write(b'4')
                 print ('44')
            if cx < width*1 / 7 :
                 arduino.write(b'44')
                 print ('444')
            if cx > width*4 / 7 :
                 arduino.write(b'6')
                 print ('6')
            if cx > width*5 / 7 :
                 arduino.write(b'6')
                 print ('66')
            if cx > width*6 / 7 :
                 arduino.write(b'66')
                 print ('666')

            if cy < height*3 / 7 :
                 arduino.write(b'2')
                 print ('2')
            if cy < height*2 / 7 :
                 arduino.write(b'2')
                 print ('22')
            if cy < height*1 / 7 :
                 arduino.write(b'22')
                 print ('222')

            if cy > height*4 / 7 :
                 arduino.write(b'8')
                 print ('8')
            if cy > height*5 / 7 :
                 arduino.write(b'8')
                 print ('88')
            if cy > height*6 / 7 :
                 arduino.write(b'88')
                 print ('888')
            '''
            # Prep string to overlay on the image
            display_str = (
                labels[output_dict.get('detection_classes_' + str(i))] + ": " +
                str(output_dict.get('detection_scores_' + str(i))) + "%")

            # Overlay bounding boxes, detection class and scores
            frame = visualize_output.draw_bounding_box(y1,
                                                       x1,
                                                       y2,
                                                       x2,
                                                       frame,
                                                       thickness=4,
                                                       color=(255, 255, 0),
                                                       display_str=display_str)
            # follow box of number, 隨目標框顯示目標號碼(號碼依目標識別率最大依序排列)
            cv2.putText(frame, '#' + str(i + 1), (x1, y1 - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2,
                        cv2.LINE_AA)
            # 使用1種字體,fire or Capture snapshots
            if cx <= (width * 4 / 7) and cx >= (width * 3 / 7) and cy <= (
                    height * 4 / 7) and cy >= (height * 3 / 7):
                text = 'Locking'
                cv2.putText(frame, text, (300, 40), cv2.FONT_HERSHEY_SIMPLEX,
                            1, (0, 0, 255), 2, cv2.LINE_AA)
                # show targets number
                cv2.putText(frame, 'Targets:' + max(str(i + 1)), (10, 100),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2,
                            cv2.LINE_AA)
                # fire to near me and the #1, 攻擊靠近本體(高威脅)和#1目標
                if ((x2 - x1) *
                    (y2 - y1)) >= (width * height) / 3 and (i + 1) == 1:
                    arduino.write(b'5')
                    print('fire')
                    text = 'FIRE'
                    cv2.putText(frame, text, (500, 40),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2,
                                cv2.LINE_AA)

                    # Cross Line in snapshots

                    green = (0, 255, 0)
                    red = (0, 0, 255)
                    cv2.line(frame, (cross_line_w, cross_line_h - 80),
                             (cross_line_w, cross_line_h + 80), green,
                             2)  #line y
                    cv2.line(frame, (cross_line_w - 20, cross_line_h - 5),
                             (cross_line_w - 20, cross_line_h + 5), green,
                             2)  #,left
                    cv2.line(frame, (cross_line_w + 20, cross_line_h - 5),
                             (cross_line_w + 20, cross_line_h + 5), green,
                             2)  #,right
                    cv2.line(frame, (cross_line_w - 40, cross_line_h - 5),
                             (cross_line_w - 40, cross_line_h + 5), green,
                             2)  #,,left
                    cv2.line(frame, (cross_line_w + 40, cross_line_h - 5),
                             (cross_line_w + 40, cross_line_h + 5), green,
                             2)  #,,right
                    cv2.line(frame, (cross_line_w - 60, cross_line_h - 5),
                             (cross_line_w - 60, cross_line_h + 5), green,
                             2)  #,,,left
                    cv2.line(frame, (cross_line_w + 60, cross_line_h - 5),
                             (cross_line_w + 60, cross_line_h + 5), green,
                             2)  #,,,right
                    cv2.line(frame, (cross_line_w - 80, cross_line_h),
                             (cross_line_w + 80, cross_line_h), green,
                             2)  #line x
                    cv2.line(frame, (cross_line_w - 5, cross_line_h - 20),
                             (cross_line_w + 5, cross_line_h - 20), green,
                             2)  #,up
                    cv2.line(frame, (cross_line_w - 5, cross_line_h + 20),
                             (cross_line_w + 5, cross_line_h + 20), green,
                             2)  #,down
                    cv2.line(frame, (cross_line_w - 5, cross_line_h - 40),
                             (cross_line_w + 5, cross_line_h - 40), green,
                             2)  #,,up
                    cv2.line(frame, (cross_line_w - 5, cross_line_h + 40),
                             (cross_line_w + 5, cross_line_h + 40), green,
                             2)  #,,down
                    cv2.line(frame, (cross_line_w - 5, cross_line_h - 60),
                             (cross_line_w + 5, cross_line_h - 60), green,
                             2)  #,,,up
                    cv2.line(frame, (cross_line_w - 5, cross_line_h + 60),
                             (cross_line_w + 5, cross_line_h + 60), green,
                             2)  #,,,down

                    # Capture snapshots
                    photo = (os.path.dirname(os.path.realpath(__file__)) +
                             "/captures/photo_" + cur_time + ".jpg")
                    cv2.imwrite(photo, frame)

            else:
                text = 'Serching....'
                cv2.putText(frame, text, (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 1,
                            (0, 255, 0), 2, cv2.LINE_AA)
                # show targets number
                cv2.putText(frame, 'Targets:' + max(str(i + 1)), (10, 100),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2,
                            cv2.LINE_AA)
            '''
            # Capture snapshots
            photo = ( os.path.dirname(os.path.realpath(__file__))
                      + "/captures/photo_"
                      + cur_time + ".jpg" )
            cv2.imwrite( photo, frame )
            '''

    # If a display is available, show the image on which inference was performed
    if 'DISPLAY' in os.environ:

        # Cross Line
        green = (0, 255, 0)
        red = (0, 0, 255)
        cv2.line(frame, (cross_line_w, cross_line_h - 80),
                 (cross_line_w, cross_line_h + 80), green, 2)  #line y
        cv2.line(frame, (cross_line_w - 20, cross_line_h - 5),
                 (cross_line_w - 20, cross_line_h + 5), green, 2)  #,left
        cv2.line(frame, (cross_line_w + 20, cross_line_h - 5),
                 (cross_line_w + 20, cross_line_h + 5), green, 2)  #,right
        cv2.line(frame, (cross_line_w - 40, cross_line_h - 5),
                 (cross_line_w - 40, cross_line_h + 5), green, 2)  #,,left
        cv2.line(frame, (cross_line_w + 40, cross_line_h - 5),
                 (cross_line_w + 40, cross_line_h + 5), green, 2)  #,,right
        cv2.line(frame, (cross_line_w - 60, cross_line_h - 5),
                 (cross_line_w - 60, cross_line_h + 5), green, 2)  #,,,left
        cv2.line(frame, (cross_line_w + 60, cross_line_h - 5),
                 (cross_line_w + 60, cross_line_h + 5), green, 2)  #,,,right
        cv2.line(frame, (cross_line_w - 80, cross_line_h),
                 (cross_line_w + 80, cross_line_h), green, 2)  #line x
        cv2.line(frame, (cross_line_w - 5, cross_line_h - 20),
                 (cross_line_w + 5, cross_line_h - 20), green, 2)  #,up
        cv2.line(frame, (cross_line_w - 5, cross_line_h + 20),
                 (cross_line_w + 5, cross_line_h + 20), green, 2)  #,down
        cv2.line(frame, (cross_line_w - 5, cross_line_h - 40),
                 (cross_line_w + 5, cross_line_h - 40), green, 2)  #,,up
        cv2.line(frame, (cross_line_w - 5, cross_line_h + 40),
                 (cross_line_w + 5, cross_line_h + 40), green, 2)  #,,down
        cv2.line(frame, (cross_line_w - 5, cross_line_h - 60),
                 (cross_line_w + 5, cross_line_h - 60), green, 2)  #,,,up
        cv2.line(frame, (cross_line_w - 5, cross_line_h + 60),
                 (cross_line_w + 5, cross_line_h + 60), green, 2)  #,,,down

        cv2.imshow('NCS live inference', frame)
Example #21
0
def main():
    devices = mvnc.enumerate_devices()
    if len(devices) < 1:
        print('No NCS device detected.')
        print('Insert device and try again!')
        return 1

    obj_detect_list = list()
    obj_age_list = list()
    obj_gender_list = list()

    device_number = 0

    net = cv2.dnn.readNetFromCaffe("./deploy.prototxt.txt",
                                   "./face_det.caffemodel")
    for one_device in devices:
        try:
            obj_detect_dev = mvnc.Device(one_device)
            obj_detect_dev.open()
            print("opened device " + str(device_number))
            obj_detector_proc = SsdMobileNetProcessor(
                "./graphs/ssd_mobilenet",
                obj_detect_dev,
                inital_box_prob_thresh=60 / 100.0,
                classification_mask=object_classifications_mask,
                name="object detector " + str(device_number))
            obj_detect_list.append(obj_detector_proc)

            obj_age_proc = AgeNetProcessor("./graphs/age_net",
                                           obj_detect_dev,
                                           name="age detector " +
                                           str(device_number))

            obj_age_list.append(obj_age_proc)

            obj_gender_proc = GenderNetProcessor("./graphs/gender_net",
                                                 obj_detect_dev,
                                                 name="gender detector " +
                                                 str(device_number))

            obj_gender_list.append(obj_gender_proc)

            device_number += 1

        except:
            print("Could not open device " + str(device_number) +
                  ", trying next device")
            pass

    if len(obj_detect_list) < 1 or len(obj_gender_list) < 1:
        print('Could not open any NCS devices.')
        print('Reinsert devices and try again!')
        return 1

    print("Using " + str(len(obj_detect_list)) +
          " devices for object detection")

    camera = cv2.VideoCapture(0)
    cc = clean_close.clean_close()
    while (True):
        if cc.close:
            for one_obj_detect_proc in obj_detect_list:
                one_obj_detect_proc.cleanup(True)
            for one_obj_age_proc in obj_age_list:
                one_obj_age_proc.cleanup(True)
            for one_obj_gender_proc in obj_gender_list:
                one_obj_gender_proc.cleanup(True)
            camera.release()
            break

        for one_obj_detect_proc, one_obj_age_proc, one_obj_gender_proc in zip(
                obj_detect_list, obj_age_list, obj_gender_list):
            ret, frame = camera.read()
            send_frame = frame
            one_obj_detect_proc.start_aysnc_inference(frame)
            (detections,
             display_image) = one_obj_detect_proc.get_async_inference_result()
            if len(detections) > 0:
                for object in detections:
                    payload = {}
                    payload['class'] = str(object[0])
                    payload['box'] = str(object[1:5])
                    payload['score'] = str(object[5])
                    send_frame = visualize_output.draw_bounding_box(
                        object[1],
                        object[2],
                        object[3],
                        object[3],
                        send_frame,
                        display_str=str(object[0]))
                    if (payload['class'] == "person"):
                        person = frame
                        (h, w) = person.shape[:2]
                        person = imutils.resize(person, width=400)
                        blob = cv2.dnn.blobFromImage(
                            cv2.resize(person, (300, 300)), 1.0, (300, 300),
                            (104.0, 177.0, 123.0))
                        net.setInput(blob)
                        detections = net.forward()
                        for i in range(0, detections.shape[2]):
                            confidence = detections[0, 0, i, 2]
                            if confidence < 80 / 100:
                                continue
                            box_face = (
                                (detections[0, 0, i, 3:7] *
                                 numpy.array([w, h, w, h])).astype("int") +
                                [-50, -50, +50, +50])
                            one_obj_age_proc.start_aysnc_inference(
                                person, box_face)
                            one_obj_gender_proc.start_aysnc_inference(
                                person, box_face)
                            (age_prediction, age_detection
                             ) = one_obj_age_proc.get_async_inference_result()
                            (
                                gender_prediction, gender_detection
                            ) = one_obj_gender_proc.get_async_inference_result(
                            )
                            send_frame = visualize_output.draw_bounding_box(
                                box_face[1],
                                box_face[0],
                                box_face[3],
                                box_face[2],
                                send_frame,
                                display_str=(str(gender_detection) + " - " +
                                             str(age_detection)))
                            gender = {}
                            age = {}
                            age['score'] = age_prediction
                            age['detection'] = age_detection
                            gender['score'] = gender_prediction
                            gender['detection'] = gender_detection
                            payload['face_box'] = str(
                                (detections[0, 0, i, 3:7] *
                                 numpy.array([w, h, w, h])).astype("int"))
                            payload['age'] = json.dumps(age)
                            payload['gender'] = json.dumps(gender)
                    print(payload)
                    publish.single("data/" + payload['class'],
                                   payload=json.dumps(payload),
                                   hostname="localhost")
Example #22
0
def infer_image(graph, img, frame):

    # Load the image as a half-precision floating point array
    graph.LoadTensor(img, 'user object')

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption(mvnc.GraphOption.TIME_TAKEN)

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd(output, CONFIDANCE_THRESHOLD,
                                         frame.shape)

    # Print the results (each image/frame may have multiple objects)
    for i in range(0, output_dict['num_detections']):

        # Filter a specific class/category
        if ((output_dict.get('detection_classes_' + str(i)) == CLASS_PERSON) or
            (output_dict.get('detection_classes_' + str(i)) == CLASS_CAR)):

            cur_time = strftime("%Y_%m_%d_%H_%M_%S", localtime())
            print("Person detected on " + cur_time)

            # Print the results (each image/frame may have multiple objects)
            print("I found these objects in " + " ( %.2f ms ):" %
                  (numpy.sum(inference_time)))

            print("%3.1f%%\t" % output_dict['detection_scores_' + str(i)] +
                  labels[int(output_dict['detection_classes_' + str(i)])] +
                  ": Top Left: " +
                  str(output_dict['detection_boxes_' + str(i)][0]) +
                  " Bottom Right: " +
                  str(output_dict['detection_boxes_' + str(i)][1]))

            # Extract top-left & bottom-right coordinates of detected objects
            (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
            (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

            #center x, y target point
            cx = x1 + (x2 - x1) / 2
            cy = y1 + (y2 - y1) / 2
            print(cx, cy)

            # 使用1種字體
            if cx <= (width * 4 / 7) and cx >= (width * 3 / 7) and cy <= (
                    height * 4 / 7) and cy >= (height * 3 / 7):
                text = 'Locking'
                cv2.putText(frame, text, (300, 40), cv2.FONT_HERSHEY_SIMPLEX,
                            1, (0, 0, 255), 2, cv2.LINE_AA)
                # fire
                if ((x2 - x1) * (y2 - y1)) >= (width * height) / 4:
                    arduino.write(b'5')
                    print('fire')
                    #time.sleep(0.5)
            else:
                text = 'Serching....'
                cv2.putText(frame, text, (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 1,
                            (0, 255, 0), 2, cv2.LINE_AA)

            # Prep string to overlay on the image
            display_str = (
                labels[output_dict.get('detection_classes_' + str(i))] + ": " +
                str(output_dict.get('detection_scores_' + str(i))) + "%")

            # Overlay bounding boxes, detection class and scores
            frame = visualize_output.draw_bounding_box(y1,
                                                       x1,
                                                       y2,
                                                       x2,
                                                       frame,
                                                       thickness=4,
                                                       color=(255, 255, 0),
                                                       display_str=display_str)
            '''
            # Capture snapshots
            photo = ( os.path.dirname(os.path.realpath(__file__))
                      + "/captures/photo_"
                      + cur_time + ".jpg" )
            cv2.imwrite( photo, frame )
            '''
    # If a display is available, show the image on which inference was performed
    if 'DISPLAY' in os.environ:

        # Cross Line
        green = (0, 255, 0)
        red = (0, 0, 255)

        cv2.line(frame, (cross_line_w, cross_line_h - 80),
                 (cross_line_w, cross_line_h + 80), green, 2)  #line y
        cv2.line(frame, (cross_line_w - 20, cross_line_h - 5),
                 (cross_line_w - 20, cross_line_h + 5), green, 2)  #'left
        cv2.line(frame, (cross_line_w + 20, cross_line_h - 5),
                 (cross_line_w + 20, cross_line_h + 5), green, 2)  #'right
        cv2.line(frame, (cross_line_w - 40, cross_line_h - 5),
                 (cross_line_w - 40, cross_line_h + 5), green, 2)  #''left
        cv2.line(frame, (cross_line_w + 40, cross_line_h - 5),
                 (cross_line_w + 40, cross_line_h + 5), green, 2)  #''right
        cv2.line(frame, (cross_line_w - 60, cross_line_h - 5),
                 (cross_line_w - 60, cross_line_h + 5), green, 2)  #'''left
        cv2.line(frame, (cross_line_w + 60, cross_line_h - 5),
                 (cross_line_w + 60, cross_line_h + 5), green, 2)  #'''right

        cv2.line(frame, (cross_line_w - 80, cross_line_h),
                 (cross_line_w + 80, cross_line_h), green, 2)  #line x
        cv2.line(frame, (cross_line_w - 5, cross_line_h - 20),
                 (cross_line_w + 5, cross_line_h - 20), green, 2)  #'up
        cv2.line(frame, (cross_line_w - 5, cross_line_h + 20),
                 (cross_line_w + 5, cross_line_h + 20), green, 2)  #'down
        cv2.line(frame, (cross_line_w - 5, cross_line_h - 40),
                 (cross_line_w + 5, cross_line_h - 40), green, 2)  #''up
        cv2.line(frame, (cross_line_w - 5, cross_line_h + 40),
                 (cross_line_w + 5, cross_line_h + 40), green, 2)  #''down
        cv2.line(frame, (cross_line_w - 5, cross_line_h - 60),
                 (cross_line_w + 5, cross_line_h - 60), green, 2)  #'''up
        cv2.line(frame, (cross_line_w - 5, cross_line_h + 60),
                 (cross_line_w + 5, cross_line_h + 60), green, 2)  #'''down

        cv2.imshow('NCS live inference', frame)
Example #23
0
def infer_image( graph, img, frame, motor ):

    # Load the image as a half-precision floating point array
    graph.LoadTensor( img, 'user object' )

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption( mvnc.GraphOption.TIME_TAKEN )

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd( 
                      output, 
                      CONFIDANCE_THRESHOLD, 
                      frame.shape )

    # Print the results (each image/frame may have multiple objects)
    print( "I found these objects in "
            + " ( %.2f ms ):" % ( numpy.sum( inference_time ) ) )

    count_person = 0
    for i in range( 0, output_dict['num_detections'] ):
        # Only interested in person.
        if labels[output_dict['detection_classes_' + str(i)]] != "15: person":
            continue
        if count_person > 0:
            continue
        count_person = count_person + 1
        print( "%3.1f%%\t" % output_dict['detection_scores_' + str(i)] 
               + labels[ int(output_dict['detection_classes_' + str(i)]) ]
               + ": Top Left: " + str( output_dict['detection_boxes_' + str(i)][0] )
               + " Bottom Right: " + str( output_dict['detection_boxes_' + str(i)][1] ) )

        # Draw bounding boxes around valid detections 
        (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
        (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]
        print("x1 x2: %d %d\n" % (x1, x2))
        mid = (x1+x2)/2
        dis = 320 - mid
        print("distance: %d\n" % dis)
        if dis > 20:
            motor.direction = "forward"
        elif dis < -20:
            motor.direction = "backward"
        else:
            motor.direction = "stop"

        # Prep string to overlay on the image
        display_str = ( 
                labels[output_dict.get('detection_classes_' + str(i))]
                + ": "
                + str( output_dict.get('detection_scores_' + str(i) ) )
                + "%" )

        frame = visualize_output.draw_bounding_box( 
                       y1, x1, y2, x2, 
                       frame,
                       thickness=4,
                       color=(255, 255, 0),
                       display_str=display_str )
    print( '\n' )

    # If a display is available, show the image on which inference was performed
    if 'DISPLAY' in os.environ:
        cv2.imshow( 'NCS live inference', frame )
Example #24
0
def infer_image_fps(graph, img, frame, fps):
    #    global direction
    global counter
    #    global plotbuff
    a = []
    # Load the image as a half-precision floating point array
    graph.LoadTensor(img, 'user object')

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption(mvnc.GraphOption.TIME_TAKEN)

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd(output, CONFIDANCE_THRESHOLD,
                                         frame.shape)

    #    elapsedtime = time.time() - starttime
    # Print the results (each image/frame may have multiple objects)
    #    print( "I found these objects in ( %.2f ms ):" % ( numpy.sum( inference_time ) ) )
    inftime = numpy.sum(inference_time)
    numobj = (output_dict['num_detections'])

    # create array for detected obj
    a = [[] for _ in range(numobj)]

    #    print (numobj)
    cpu = psutil.cpu_percent()

    for i in range(0, output_dict['num_detections']):
        print("%3.1f%%\t" % output_dict['detection_scores_' + str(i)] +
              labels[int(output_dict['detection_classes_' + str(i)])] +
              ": Top Left: " +
              str(output_dict['detection_boxes_' + str(i)][0]) +
              " Bottom Right: " +
              str(output_dict['detection_boxes_' + str(i)][1]))
        #        print(str(i))
        a[i].append(output_dict['detection_scores_' + str(i)])
        a[i].append(labels[int(output_dict['detection_classes_' + str(i)])])
        a[i].append(str(output_dict['detection_boxes_' + str(i)][0]))
        a[i].append(str(output_dict['detection_boxes_' + str(i)][1]))
        # Draw bounding boxes around valid detections
        (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
        (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

        # Prep string to overlay on the image

        display_str = (labels[output_dict.get('detection_classes_' + str(i))] +
                       ": " +
                       str(output_dict.get('detection_scores_' + str(i))) +
                       "%")

        frame = visualize_output.draw_bounding_box(y1,
                                                   x1,
                                                   y2,
                                                   x2,
                                                   frame,
                                                   thickness=4,
                                                   color=(255, 255, 0),
                                                   display_str=display_str)
        cv2.putText(frame, 'FPS:' + str(fps), (10, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 2,
                    cv2.LINE_AA)
#        cv2.putText(frame, direction, (10,30), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0,0,255),3)
#    print( '\n' )

# If a display is available, show the image on which inference was performed
    counter += 1
    if displayit == "on":
        cv2.imshow('NCS live inference', frame)

    # need to log here.
#    print (elapsedtime)
# for tracking one human...
#    print(str("{0:.2f}".format(elapsedtime)) + '\t' + str(cpu) + '\t' + str("{0:.2f}".format(inftime)) + '\t' + str("{0:.2f}".format(fps)) +'\t' + str(numobj)+'\t'+str(a)+'\n')

# need to save to redis.
    save = {
        "elapsedtime": "{0:.2f}".format(elapsedtime),
        "CPU": str(cpu),
        "inftime": str("{0:.2f}".format(inftime)),
        "fps": str("{0:.2f}".format(fps)),
        "numberofobjects": str(numobj),
        "a": str(a)
    }

    r.hmset(counter, save)
    #    r.sadd("myset",save)
    print(r.hgetall(counter))
    #print(save)
    # need plots...! for multiple objects
    del (a)
Example #25
0
def infer_image(graph, img, frame):

    # Load the image as a half-precision floating point array
    graph.LoadTensor(img, 'user object')

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption(mvnc.GraphOption.TIME_TAKEN)

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd(output, CONFIDANCE_THRESHOLD,
                                         frame.shape)

    # Print the results (each image/frame may have multiple objects)
    for i in range(0, output_dict['num_detections']):

        # Filter a specific class/category
        if ((output_dict.get('detection_classes_' + str(i)) == CLASS_PERSON) or
            (output_dict.get('detection_classes_' + str(i)) == CLASS_CAR)):

            cur_time = strftime("%Y_%m_%d_%H_%M_%S", localtime())
            print("Person detected on " + cur_time)

            # Print the results (each image/frame may have multiple objects)
            print("I found these objects in " + " ( %.2f ms ):" %
                  (numpy.sum(inference_time)))

            print("%3.1f%%\t" % output_dict['detection_scores_' + str(i)] +
                  labels[int(output_dict['detection_classes_' + str(i)])] +
                  ": Top Left: " +
                  str(output_dict['detection_boxes_' + str(i)][0]) +
                  " Bottom Right: " +
                  str(output_dict['detection_boxes_' + str(i)][1]))

            # Extract top-left & bottom-right coordinates of detected objects
            (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
            (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

            #center x, y target point of object box
            cx = x1 + (x2 - x1) / 2
            cy = y1 + (y2 - y1) / 2
            print(cx, cy)
            # print target number 終端顯示目標號碼
            print('#' + str(i + 1))

            # 追蹤目標識別率最大#1
            if (i + 1) == 1:
                #servo motor
                if cx < width * 5 / 10 and cx > width * 4 / 10:
                    arduino.write(b'e')
                    print('e')

                if cx < width * 4 / 10 and cx > width * 3 / 10:
                    arduino.write(b'd')
                    print('d')
                if cx < width * 3 / 10 and cx > width * 2 / 10:
                    arduino.write(b'c')
                    print('c')
                if cx < width * 2 / 10 and cx > width / 10:
                    arduino.write(b'b')
                    print('b')
                if cx < width / 10:
                    arduino.write(b'a')
                    print('a')

                if cx > width * 6 / 10 and cx < width * 7 / 10:
                    arduino.write(b'f')
                    print('f')
                if cx > width * 7 / 10 and cx < width * 8 / 10:
                    arduino.write(b'g')
                    print('g')
                if cx > width * 8 / 10 and cx < width * 9 / 10:
                    arduino.write(b'h')
                    print('h')
                if cx > width * 9 / 10:
                    arduino.write(b'i')
                    print('i')

                if cy < height * 5 / 10 and cy > height * 4 / 10:
                    arduino.write(b'n')
                    print('n')
                if cy < height * 4 / 10 and cy > height * 3 / 10:
                    arduino.write(b'm')
                    print('m')
                if cy < height * 3 / 10 and cy > height * 2 / 10:
                    arduino.write(b'l')
                    print('l')
                if cy < height * 2 / 10 and cy > height / 10:
                    arduino.write(b'k')
                    print('k')
                if cy < height / 10:
                    arduino.write(b'j')
                    print('j')

                if cy > height * 6 / 10 and cy < height * 7 / 10:
                    arduino.write(b'o')
                    print('o')
                if cy > height * 7 / 10 and cy < height * 8 / 10:
                    arduino.write(b'p')
                    print('p')
                if cy > height * 8 / 10 and cy < height * 9 / 10:
                    arduino.write(b'q')
                    print('q')
                if cy > height * 9 / 10:
                    arduino.write(b'r')
                    print('r')
            '''
            #servo motor
            if cx < width*5/ 10 and cx > width*4/ 10 :                  
                 arduino.write(b'e')                
                 print ('e')
            
            if cx < width*4/ 10 and cx > width*3/ 10 :                  
                 arduino.write(b'd')                
                 print ('d')
            if cx < width*3/ 10 and cx > width*2/ 10 :                  
                 arduino.write(b'c')                
                 print ('c')                        
            if cx < width*2/ 10 and cx > width/ 10 :
                 arduino.write(b'b')
                 print ('b')
            if cx < width/ 10 :
                 arduino.write(b'a')
                 print ('a')

            if cx > width*6 / 10 and cx < width*7/ 10 :
                 arduino.write(b'f')
                 print ('f')
            if cx > width*7/ 10 and cx < width*8/ 10 :
                 arduino.write(b'g')
                 print ('g')
            if cx > width*8/ 10 and cx < width*9/ 10 :
                 arduino.write(b'h')
                 print ('h')
            if cx > width*9/ 10 :
                 arduino.write(b'i')
                 print ('i')

            if cy < height*5/ 10 and cy > height*4/ 10 :
                 arduino.write(b'n')
                 print ('n')
            if cy < height*4/ 10 and cy > height*3/ 10 :
                 arduino.write(b'm')
                 print ('m')
            if cy < height*3/ 10 and cy > height*2/ 10 :
                 arduino.write(b'l')
                 print ('l')
            if cy < height*2/ 10 and cy > height/ 10 :
                 arduino.write(b'k')
                 print ('k')
            if cy < height/ 10:
                 arduino.write(b'j')
                 print ('j')


            if cy > height*6 / 10 and cy < height*7 / 10 :
                 arduino.write(b'o')
                 print ('o')
            if cy > height*7 / 10 and cy < height*8 / 10 :
                 arduino.write(b'p')
                 print ('p')
            if cy > height*8 / 10 and cy < height*9 / 10 :
                 arduino.write(b'q')
                 print ('q')     
            if cy > height*9 / 10:
                 arduino.write(b'r')
                 print ('r')
            '''

            # Prep string to overlay on the image
            display_str = (
                labels[output_dict.get('detection_classes_' + str(i))] + ": " +
                str(output_dict.get('detection_scores_' + str(i))) + "%")

            # Overlay bounding boxes, detection class and scores
            frame = visualize_output.draw_bounding_box(y1,
                                                       x1,
                                                       y2,
                                                       x2,
                                                       frame,
                                                       thickness=4,
                                                       color=(255, 255, 0),
                                                       display_str=display_str)
            # follow box of number, 隨目標框顯示目標號碼(號碼依目標識別率最大依序排列)
            cv2.putText(frame, '#' + str(i + 1), (x1, y1 - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2,
                        cv2.LINE_AA)
            cv2.putText(frame, 'Targets:' + str(i + 1), (10, 100),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2,
                        cv2.LINE_AA)
            # fire
            if ((x2 - x1) *
                (y2 - y1)) >= (width * height) / 4 and (i + 1) == 1:
                arduino.write(b'5')
                print('fire')
                text = 'FIRE'
                cv2.putText(frame, text, (500, 40), cv2.FONT_HERSHEY_SIMPLEX,
                            1, (0, 0, 255), 2, cv2.LINE_AA)

                # Capture snapshots
                photo = (os.path.dirname(os.path.realpath(__file__)) +
                         "/captures/photo_" + cur_time + ".jpg")
                cv2.imwrite(photo, frame)
            '''
            # Capture snapshots
            photo = ( os.path.dirname(os.path.realpath(__file__))
                      + "/captures/photo_"
                      + cur_time + ".jpg" )
            cv2.imwrite( photo, frame )
            '''
    # If a display is available, show the image on which inference was performed
    if 'DISPLAY' in os.environ:
        cv2.imshow('NCS live inference', frame)