Ejemplo n.º 1
0
def infer_image_v2( graph, img_draw, img):

    # The first inference takes an additional ~20ms due to memory 
    # initializations, so we make a 'dummy forward pass'.
    graph.LoadTensor( img, 'user object' )
    output, userobj = graph.GetResult()

    # Load the image as a half-precision floating point array
    graph.LoadTensor( img, 'user object' )

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption( mvnc.GraphOption.TIME_TAKEN )

    # Deserialize the output into a python dictionary
    if ARGS['network'] == 'SSD':
        output_dict = deserialize_output.ssd( output, CONFIDANCE_THRESHOLD, img_draw.shape )
    elif ARGS['network'] == 'TinyYolo':
        output_dict = deserialize_output.tinyyolo( output, CONFIDANCE_THRESHOLD, img_draw.shape )
        
    tab = []

    for i in range( 0, output_dict['num_detections'] ):
        tabbis = []
        tabbis.append( '{}'.format(output_dict['detection_scores_' + str(i)]) )
        tabbis.append( labels[ int(output_dict['detection_classes_' + str(i)]) ] )
        tabbis.append( output_dict['detection_boxes_' + str(i)][0] )
        tabbis.append( output_dict['detection_boxes_' + str(i)][1] )
        tab.append(tabbis)

        # Draw bounding boxes around valid detections 
        (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
        (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

        # Prep string to overlay on the image
        display_str = ( 
                labels[output_dict.get('detection_classes_' + str(i))]
                + ": "
                + str( output_dict.get('detection_scores_' + str(i) ) )
                + "%" )

        img_draw = visualize_output.draw_bounding_box( 
                       y1, x1, y2, x2, 
                       img_draw,
                       thickness=4,
                       color=(255, 255, 0),
                       display_str=display_str )

    print( "==============================================================\n" )

    if not os.path.exists('pictures'):
        os.makedirs('pictures')

    skimage.io.imshow( img_draw )
    filename = 'pictures/{}.jpg'.format(str(round(time.time() * 1000)))
    skimage.io.imsave(filename, img_draw)
        
    return tab
Ejemplo n.º 2
0
def infer_image( graph, img ):

    # Read original image, so we can perform visualization ops on it
    img_draw = skimage.io.imread( ARGS['image'] )

    # The first inference takes an additional ~20ms due to memory 
    # initializations, so we make a 'dummy forward pass'.
    graph.LoadTensor( img, 'user object' )
    output, userobj = graph.GetResult()

    # Load the image as a half-precision floating point array
    graph.LoadTensor( img, 'user object' )

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption( mvnc.GraphOption.TIME_TAKEN )

    # Deserialize the output into a python dictionary
    if ARGS['network'] == 'SSD':
        output_dict = deserialize_output.ssd( output, CONFIDANCE_THRESHOLD, img_draw.shape )
    elif ARGS['network'] == 'TinyYolo':
        output_dict = deserialize_output.tinyyolo( output, CONFIDANCE_THRESHOLD, img_draw.shape )

    # Print the results
    print( "\n==============================================================" )
    print( "I found these objects in", ntpath.basename( ARGS['image'] ) )
    print( "Execution time: " + str( np.sum( inference_time ) ) + "ms" )
    print( "--------------------------------------------------------------" )
    for i in range( 0, output_dict['num_detections'] ):
        print( "%3.1f%%\t" % output_dict['detection_scores_' + str(i)]
               + labels[ int(output_dict['detection_classes_' + str(i)]) ]
               + ": Top Left: " + str( output_dict['detection_boxes_' + str(i)][0] )
               + " Bottom Right: " + str( output_dict['detection_boxes_' + str(i)][1] ) )

        # Draw bounding boxes around valid detections 
        (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
        (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

        # Prep string to overlay on the image
        display_str = ( 
                labels[output_dict.get('detection_classes_' + str(i))]
                + ": "
                + str( output_dict.get('detection_scores_' + str(i) ) )
                + "%" )

        img_draw = visualize_output.draw_bounding_box( 
                       y1, x1, y2, x2, 
                       img_draw,
                       thickness=4,
                       color=(255, 255, 0),
                       display_str=display_str )

    print( "==============================================================\n" )

    # If a display is available, show the image on which inference was performed
    if 'DISPLAY' in os.environ:
        skimage.io.imshow( img_draw )
        skimage.io.show()
Ejemplo n.º 3
0
def infer_image(graph, img, frame):

    # Load the image as a half-precision floating point array
    graph.LoadTensor(img, 'user object')

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption(mvnc.GraphOption.TIME_TAKEN)

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd(output, CONFIDANCE_THRESHOLD,
                                         frame.shape)

    # Print the results (each image/frame may have multiple objects)
    print("I found these objects in " + " ( %.2f ms ):" %
          (numpy.sum(inference_time)))
    flag = 0

    for i in range(0, output_dict['num_detections']):
        print("%3.1f%%\t" % output_dict['detection_scores_' + str(i)] +
              labels[int(output_dict['detection_classes_' + str(i)])] +
              ": Top Left: " +
              str(output_dict['detection_boxes_' + str(i)][0]) +
              " Bottom Right: " +
              str(output_dict['detection_boxes_' + str(i)][1]))

        # Draw bounding boxes around valid detections
        (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
        (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]
        if labels[output_dict.get('detection_classes_' +
                                  str(i))] == '5: bottle':
            flag = 1
            xdata1 = x1
            xdata2 = x2

        # Prep string to overlay on the image
        display_str = (labels[output_dict.get('detection_classes_' + str(i))] +
                       ": " +
                       str(output_dict.get('detection_scores_' + str(i))) +
                       "%")

        frame = visualize_output.draw_bounding_box(y1,
                                                   x1,
                                                   y2,
                                                   x2,
                                                   frame,
                                                   thickness=4,
                                                   color=(255, 255, 0),
                                                   display_str=display_str)
    print('\n')

    # If a display is available, show the image on which inference was performed
    if 'DISPLAY' in os.environ:
        cv2.imshow('NCS live inference', frame)
    if flag == 0:
        return int((xdata1 + xdata2) / 2) - 288
    else:
        return -800
Ejemplo n.º 4
0
def infer_image(graph, img, frame, fps):

    # Load the image as a half-precision floating point array
    graph.LoadTensor(img, 'user object')

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption(mvnc.GraphOption.TIME_TAKEN)

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd(output, CONFIDANCE_THRESHOLD,
                                         frame.shape)

    # Print the results (each image/frame may have multiple objects)
    for i in range(0, output_dict['num_detections']):

        # Filter a specific class/category
        if (output_dict.get('detection_classes_' + str(i)) == CLASS_PERSON):

            #cur_time = strftime( "%Y_%m_%d_%H_%M_%S", localtime() )
            #print( "Person detected on " + cur_time )

            # Extract top-left & bottom-right coordinates of detected objects
            (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
            (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

            # Prep string to overlay on the image
            display_str = (
                labels[output_dict.get('detection_classes_' + str(i))] + ": " +
                str(output_dict.get('detection_scores_' + str(i))) + "%")

            # Overlay bounding boxes, detection class and scores
            frame = visualize_output.draw_bounding_box(y1,
                                                       x1,
                                                       y2,
                                                       x2,
                                                       frame,
                                                       thickness=4,
                                                       color=(255, 255, 0),
                                                       display_str=display_str)

            # Capture snapshots
            #img = Image.fromarray( frame )
            #photo = ( os.path.dirname(os.path.realpath(__file__))
            #          + "/captures/photo_"
            #          + cur_time + ".jpg" )
            #img.save( photo )

    # If a display is available, show the image on which inference was performed
    #if 'DISPLAY' in os.environ:
    #    img.show()

    frame = visualize_output.draw_fps(frame, fps)
    img = Image.fromarray(frame)
    imgbytearr = io.BytesIO()
    img.save(imgbytearr, format='jpeg')
    mqttframe = imgbytearr.getvalue()
    client.publish(MQTT_TOPIC, mqttframe, 0, False)
Ejemplo n.º 5
0
def infer_image( graph, img ):

    # Read original image, so we can perform visualization ops on it
    img_draw = skimage.io.imread( ARGS.image )

    # The first inference takes an additional ~20ms due to memory 
    # initializations, so we make a 'dummy forward pass'.
    graph.LoadTensor( img, 'user object' )
    output, userobj = graph.GetResult()

    # Load the image as a half-precision floating point array
    graph.LoadTensor( img, 'user object' )

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption( mvnc.GraphOption.TIME_TAKEN )

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd( 
                      output, 
                      CONFIDANCE_THRESHOLD, 
                      img_draw.shape )

    # Print the results
    print( "\n==============================================================" )
    print( "I found these objects in", ntpath.basename( ARGS.image ) )
    print( "Execution time: " + str( numpy.sum( inference_time ) ) + "ms" )
    print( "--------------------------------------------------------------" )
    for i in range( 0, output_dict['num_detections'] ):
        print( "%3.1f%%\t" % output_dict['detection_scores_' + str(i)]
               + labels[ int(output_dict['detection_classes_' + str(i)]) ]
               + ": Top Left: " + str( output_dict['detection_boxes_' + str(i)][0] )
               + " Bottom Right: " + str( output_dict['detection_boxes_' + str(i)][1] ) )

        # Draw bounding boxes around valid detections 
        (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
        (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

        # Prep string to overlay on the image
        display_str = ( 
                labels[output_dict.get('detection_classes_' + str(i))]
                + ": "
                + str( output_dict.get('detection_scores_' + str(i) ) )
                + "%" )

        img_draw = visualize_output.draw_bounding_box( 
                       y1, x1, y2, x2, 
                       img_draw,
                       thickness=4,
                       color=(255, 255, 0),
                       display_str=display_str )

    print( "==============================================================\n" )

    # If a display is available, show the image on which inference was performed
    if 'DISPLAY' in os.environ:
        skimage.io.imshow( img_draw )
        skimage.io.show()
Ejemplo n.º 6
0
def ssd_infer_image(tensor, image_name):
    original_image = skimage.io.imread(image_name)
    output, inference_time = mv.manage_NCS(graph_file, tensor)
    output_dict = deserialize_output.ssd(output, CONFIDENCE_THRESHOLD,
                                         original_image.shape)

    # Compile results
    results = "SSD Object Detection results:\n\n This image contains:\n"

    for i in range(0, output_dict['num_detections']):
        results = results + str(output_dict['detection_scores_' + str(i)]) + "% confidence it could be a " + \
                  labels[int(output_dict['detection_classes_'   + str(i)])][3:] + "\n"

        # Draw bounding boxes around valid detections
        (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
        (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

        # Prep string to overlay on the image
        display_str = (labels[output_dict.get('detection_classes_' + str(i))] +
                       str(output_dict.get('detection_scores_' + str(i))) +
                       "%")

        original_image = visualize_output.draw_bounding_box(
            y1,
            x1,
            y2,
            x2,
            original_image,
            thickness=6,
            color=(255, 255, 0),
            display_str=display_str)
    results = results + "\n\nExecution time: " + str(
        int(numpy.sum(inference_time))) + " ms"

    return original_image, results
Ejemplo n.º 7
0
def infer_image(graph, img, frame):

    # Load the image as a half-precision floating point array
    graph.LoadTensor(img, 'user object')

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption(mvnc.GraphOption.TIME_TAKEN)

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd(output, CONFIDANCE_THRESHOLD,
                                         frame.shape)

    global lastcount
    count = 0
    selected = 'bottle'

    for i in range(0, output_dict['num_detections']):
        print("%3.1f%%\t" % output_dict['detection_scores_' + str(i)] +
              labels[int(output_dict['detection_classes_' + str(i)])] +
              ": Top Left: " +
              str(output_dict['detection_boxes_' + str(i)][0]) +
              " Bottom Right: " +
              str(output_dict['detection_boxes_' + str(i)][1]))
        if str(selected) in labels[int(output_dict['detection_classes_' +
                                                   str(i)])]:
            count = count + 1
            # Draw bounding boxes around valid detections
            (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
            (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

            # Prep string to overlay on the image
            display_str = (
                labels[output_dict.get('detection_classes_' + str(i))] + ": " +
                str(output_dict.get('detection_scores_' + str(i))) + "%")

            frame = visualize_output.draw_bounding_box(y1,
                                                       x1,
                                                       y2,
                                                       x2,
                                                       frame,
                                                       thickness=4,
                                                       color=(255, 255, 0),
                                                       display_str=display_str)
    if lastcount != count and selected != '':
        datastring = 'Item=' + selected + "&Count=" + str(count)
        file = open('./bottles.txt', 'w')
        file.write(datastring)
        file.close()
        lastcount = count
    print(count)
    print('\n')

    # If a display is available, show the image on which inference was performed
    if 'DISPLAY' in os.environ:
        cv2.imshow('NCS live inference', frame)
Ejemplo n.º 8
0
def infer_image( graph, img, frame ):

    # Load the image as a half-precision floating point array
    graph.LoadTensor( img, 'user object' )

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption( mvnc.GraphOption.TIME_TAKEN )

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd( 
                      output, 
                      CONFIDANCE_THRESHOLD, 
                      frame.shape )

    # Print the results (each image/frame may have multiple objects)
    #print( "I found these objects in "
            #+ " ( %.2f ms ):" % ( numpy.sum( inference_time ) ) )
    global lastcount
    count = 0
    for i in range( 0, output_dict['num_detections'] ):
        print( "%3.1f%%\t" % output_dict['detection_scores_' + str(i)] 
               + labels[ int(output_dict['detection_classes_' + str(i)]) ]
               + ": Top Left: " + str( output_dict['detection_boxes_' + str(i)][0] )
               + " Bottom Right: " + str( output_dict['detection_boxes_' + str(i)][1] ) )
        if 'person' in labels[ int(output_dict['detection_classes_' + str(i)]) ]:
            count = count + 1
            # Draw bounding boxes around valid detections 
            (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
            (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

            # Prep string to overlay on the image
            display_str = ( 
                labels[output_dict.get('detection_classes_' + str(i))]
                + ": "
                + str( output_dict.get('detection_scores_' + str(i) ) )
                + "%" )

            frame = visualize_output.draw_bounding_box( 
                       y1, x1, y2, x2, 
                       frame,
                       thickness=4,
                       color=(255, 255, 0),
                       display_str=display_str )
    if lastcount != count:
        file = open('../node_server/client/people.txt','w')
        file.write(str(count)) 
        file.close() 

    lastcount = count
    print( str(count) + ' people\n' )

    # If a display is available, show the image on which inference was performed
    if 'DISPLAY' in os.environ:
        cv2.imshow( 'NCS live inference', frame )
Ejemplo n.º 9
0
def infer_image(graph, img, frame):
    # Load the image as a half-precision floating point array
    graph.LoadTensor(img, 'user object')

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption(mvnc.GraphOption.TIME_TAKEN)

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd(output, CONFIDANCE_THRESHOLD,
                                         frame.shape)

    # Print the results (each image/frame may have multiple objects)
    output_str = "\n==============================================================\n"
    output_str += "Execution time: " + "%.1fms" % (
        numpy.sum(inference_time)) + "\n"
    for i in range(0, output_dict['num_detections']):
        output_str += "%3.1f%%\t" % output_dict['detection_scores_' + str(i)]
        output_str += labels[int(output_dict['detection_classes_' + str(i)])]
        output_str += ": Top Left: " + str(
            output_dict['detection_boxes_' + str(i)][0])
        output_str += " Bottom Right: " + str(
            output_dict['detection_boxes_' + str(i)][1])
        output_str += "\n"

        # Draw bounding boxes around valid detections
        (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
        (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

        label = labels[output_dict.get('detection_classes_' + str(i))]
        labelsArray = label.split(":")
        if len(labelsArray) == 2:
            label = labelsArray[1]

        # Prep string to overlay on the image
        display_str = (label + ": " +
                       str(output_dict.get('detection_scores_' + str(i))) +
                       "%")

        frame = visualize_output.draw_bounding_box(y1,
                                                   x1,
                                                   y2,
                                                   x2,
                                                   frame,
                                                   thickness=2,
                                                   fontsize=20,
                                                   outlineColor=(0, 255, 0),
                                                   textColor=(0, 255, 0),
                                                   display_str=display_str)

    output_str += "==============================================================\n"

    print(output_str)

    return frame
Ejemplo n.º 10
0
def infer_image( graph, img, frame ):

    # Load the image as a half-precision floating point array
    graph.LoadTensor( img, 'user object' )

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption( mvnc.GraphOption.TIME_TAKEN )

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd( 
                      output, 
                      CONFIDANCE_THRESHOLD, 
                      frame.shape )

    # Print the results (each image/frame may have multiple objects)
    for i in range( 0, output_dict['num_detections'] ):

        # Filter a specific class/category
        if( output_dict.get( 'detection_classes_' + str(i) ) == CLASS_PERSON ):

            cur_time = strftime( "%Y_%m_%d_%H_%M_%S", localtime() )
            print( "Person detected on " + cur_time )

            # Extract top-left & bottom-right coordinates of detected objects 
            (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
            (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

            # Prep string to overlay on the image
            display_str = ( 
                labels[output_dict.get('detection_classes_' + str(i))]
                + ": "
                + str( output_dict.get('detection_scores_' + str(i) ) )
                + "%" )

            # Overlay bounding boxes, detection class and scores
            frame = visualize_output.draw_bounding_box( 
                        y1, x1, y2, x2, 
                        frame,
                        thickness=4,
                        color=(255, 255, 0),
                        display_str=display_str )

            # Capture snapshots
            img = Image.fromarray( frame )
            photo = ( os.path.dirname(os.path.realpath(__file__))
                      + "/captures/photo_"
                      + cur_time + ".jpg" )
            img.save( photo )

    # If a display is available, show the image on which inference was performed
    if 'DISPLAY' in os.environ:
        img.show()
Ejemplo n.º 11
0
def infer_image(graph, img, frame):

    # Load the image as a half-precision floating point array
    graph.LoadTensor(img, 'user object')

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption(mvnc.GraphOption.TIME_TAKEN)

    # Deserialize the output into a python dictionary
    #print("the shape of the frame is "+str(frame.shape))
    output_dict = deserialize_output.ssd(output, CONFIDANCE_THRESHOLD,
                                         frame.shape)

    # Print the results (each image/frame may have multiple objects)
    #print( "I found these objects in "
    #       + " ( %.2f ms ):" % ( numpy.sum( inference_time ) ) )

    for i in range(0, output_dict['num_detections']):
        if (i > 0):
            f.write(" and ")
        f.write("%3.1f%%\t" % output_dict['detection_scores_' + str(i)] +
                labels[int(output_dict['detection_classes_' + str(i)])] +
                ": Top Left: " +
                str(output_dict['detection_boxes_' + str(i)][0]) +
                " Bottom Right: " +
                str(output_dict['detection_boxes_' + str(i)][1]))
        #f.write('\n')

        # Draw bounding boxes around valid detections
        (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
        (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

        # Prep string to overlay on the image
        display_str = (labels[output_dict.get('detection_classes_' + str(i))] +
                       ": " +
                       str(output_dict.get('detection_scores_' + str(i))) +
                       "%")

        frame = visualize_output.draw_bounding_box(y1,
                                                   x1,
                                                   y2,
                                                   x2,
                                                   frame,
                                                   thickness=4,
                                                   color=(255, 255, 0),
                                                   display_str=display_str)
    f.write('\n')
    return frame
Ejemplo n.º 12
0
def infer_image( graph, img, frame ):

    # Load the image as a half-precision floating point array
    graph.LoadTensor( img, 'user object' )

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption( mvnc.GraphOption.TIME_TAKEN )

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd( 
                      output, 
                      CONFIDANCE_THRESHOLD, 
                      frame.shape )

    # Print the results (each image/frame may have multiple objects)
    print( "I found these objects in "
            + " ( %.2f ms ):" % ( numpy.sum( inference_time ) ) )

    for i in range( 0, output_dict['num_detections'] ):
        print( "%3.1f%%\t" % output_dict['detection_scores_' + str(i)] 
               + labels[ int(output_dict['detection_classes_' + str(i)]) ]
               + ": Top Left: " + str( output_dict['detection_boxes_' + str(i)][0] )
               + " Bottom Right: " + str( output_dict['detection_boxes_' + str(i)][1] ) )

        # Draw bounding boxes around valid detections 
        (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
        (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

        # Prep string to overlay on the image
        display_str = ( 
                labels[output_dict.get('detection_classes_' + str(i))]
                + ": "
                + str( output_dict.get('detection_scores_' + str(i) ) )
                + "%" )

        frame = visualize_output.draw_bounding_box( 
                       y1, x1, y2, x2, 
                       frame,
                       thickness=4,
                       color=(255, 255, 0),
                       display_str=display_str )
    print( '\n' )

    # If a display is available, show the image on which inference was performed
    if 'DISPLAY' in os.environ:
        cv2.imshow( 'NCS live inference', frame )
Ejemplo n.º 13
0
def infer_image(graph, img, frame):
    # Get the results from NCS
    output, inference_time = mv.infer_image(graph, img)

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd(output, CONFIDENCE_THRESHOLD,
                                         frame.shape)

    # Print the results (each image/frame may have multiple objects)
    for i in range(0, output_dict['num_detections']):
        # Filter a specific class/category
        if (output_dict.get('detection_classes_' + str(i)) == BANNED_CLASS):
            print("Illegal object found!")
        cur_time = strftime("%Y_%m_%d_%H_%M_%S", localtime())

        # Extract top-left & bottom-right coordinates of detected objects
        (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
        (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

        # Prep string to overlay on the image
        display_str = (labels[output_dict.get('detection_classes_' + str(i))] +
                       ": " +
                       str(output_dict.get('detection_scores_' + str(i))) +
                       "%")

        # Overlay bounding boxes, detection class and scores
        frame = visualize_output.draw_bounding_box(y1,
                                                   x1,
                                                   y2,
                                                   x2,
                                                   frame,
                                                   thickness=4,
                                                   color=(255, 255, 0),
                                                   display_str=display_str)

        # Capture snapshots
        photo = (os.path.dirname(os.path.realpath(__file__)) +
                 "/captures/photo_" + cur_time + ".jpg")
        cv2.imwrite(photo, frame)

    # If a display is available, show the image on which inference was performed
    if 'DISPLAY' in os.environ:
        cv2.imshow('NCS live inference', frame)
Ejemplo n.º 14
0
def infer_image(graph, img, frame):

    # Load the image as a half-precision floating point array
    graph.LoadTensor(img, 'user object')

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption(mvnc.GraphOption.TIME_TAKEN)

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd(output, CONFIDANCE_THRESHOLD,
                                         frame.shape)

    # Print the results (each image/frame may have multiple objects)
    for i in range(0, output_dict['num_detections']):

        # Filter a specific class/category
        if (output_dict.get('detection_classes_' + str(i)) == CLASS_PERSON):

            # Extract top-left & bottom-right coordinates of detected objects
            (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
            (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

            # Prep string to overlay on the image
            display_str = (
                labels[output_dict.get('detection_classes_' + str(i))] + ": " +
                str(output_dict.get('detection_scores_' + str(i))) + "%")

            # Overlay bounding boxes, detection class and scores
            frame = visualize_output.draw_bounding_box(y1,
                                                       x1,
                                                       y2,
                                                       x2,
                                                       frame,
                                                       thickness=4,
                                                       color=(255, 255, 0),
                                                       display_str=display_str)

    # If a display is available, show the image on which inference was performed
    if 'DISPLAY' in os.environ:
        cv2.imshow('NCS live inference', frame)
def infer_image(movidius, graph, img, frame, labels):

    # Load the image as a half-precision floating point array
    graph.LoadTensor(img, 'user object')

    # Get the results from NCS
    output, _ = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption(mvnc.GraphOption.TIME_TAKEN)

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd(output, CONFIDENCE_THRESHOLD,
                                         frame.shape)

    num_detections = output_dict['num_detections']
    # print( "%i objects identified in %.1f ms" % (num_detections, numpy.sum(inference_time)))

    # publish over mqtt
    publish_detctions(num_detections, output_dict, labels)

    # If a display is available, show the image and results
    if 'DISPLAY' in os.environ:
        show_image(num_detections, output_dict, movidius, frame, labels)
Ejemplo n.º 16
0
def infer_image(graph, img, frame):

    # Load the image as a half-precision floating point array
    graph.LoadTensor(img, 'user object')

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption(mvnc.GraphOption.TIME_TAKEN)

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd(output, CONFIDANCE_THRESHOLD,
                                         frame.shape)

    # Print the results (each image/frame may have multiple objects)
    for i in range(0, output_dict['num_detections']):

        # Filter a specific class/category
        if ((output_dict.get('detection_classes_' + str(i)) == CLASS_PERSON) or
            (output_dict.get('detection_classes_' + str(i)) == CLASS_CAR)):

            cur_time = strftime("%Y_%m_%d_%H_%M_%S", localtime())
            print("Person detected on " + cur_time)

            # Print the results (each image/frame may have multiple objects)
            print("I found these objects in " + " ( %.2f ms ):" %
                  (numpy.sum(inference_time)))

            print("%3.1f%%\t" % output_dict['detection_scores_' + str(i)] +
                  labels[int(output_dict['detection_classes_' + str(i)])] +
                  ": Top Left: " +
                  str(output_dict['detection_boxes_' + str(i)][0]) +
                  " Bottom Right: " +
                  str(output_dict['detection_boxes_' + str(i)][1]))

            # Extract top-left & bottom-right coordinates of detected objects
            (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
            (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

            #center x, y target point
            cx = x1 + (x2 - x1) / 2
            cy = y1 + (y2 - y1) / 2
            print(cx, cy)

            # 使用1種字體
            if cx <= (width * 4 / 7) and cx >= (width * 3 / 7) and cy <= (
                    height * 4 / 7) and cy >= (height * 3 / 7):
                text = 'Locking'
                cv2.putText(frame, text, (300, 40), cv2.FONT_HERSHEY_SIMPLEX,
                            1, (0, 0, 255), 2, cv2.LINE_AA)
                # fire
                if ((x2 - x1) * (y2 - y1)) >= (width * height) / 4:
                    arduino.write(b'5')
                    print('fire')
                    #time.sleep(0.5)
            else:
                text = 'Serching....'
                cv2.putText(frame, text, (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 1,
                            (0, 255, 0), 2, cv2.LINE_AA)

            # Prep string to overlay on the image
            display_str = (
                labels[output_dict.get('detection_classes_' + str(i))] + ": " +
                str(output_dict.get('detection_scores_' + str(i))) + "%")

            # Overlay bounding boxes, detection class and scores
            frame = visualize_output.draw_bounding_box(y1,
                                                       x1,
                                                       y2,
                                                       x2,
                                                       frame,
                                                       thickness=4,
                                                       color=(255, 255, 0),
                                                       display_str=display_str)
            '''
            # Capture snapshots
            photo = ( os.path.dirname(os.path.realpath(__file__))
                      + "/captures/photo_"
                      + cur_time + ".jpg" )
            cv2.imwrite( photo, frame )
            '''
    # If a display is available, show the image on which inference was performed
    if 'DISPLAY' in os.environ:

        # Cross Line
        green = (0, 255, 0)
        red = (0, 0, 255)

        cv2.line(frame, (cross_line_w, cross_line_h - 80),
                 (cross_line_w, cross_line_h + 80), green, 2)  #line y
        cv2.line(frame, (cross_line_w - 20, cross_line_h - 5),
                 (cross_line_w - 20, cross_line_h + 5), green, 2)  #'left
        cv2.line(frame, (cross_line_w + 20, cross_line_h - 5),
                 (cross_line_w + 20, cross_line_h + 5), green, 2)  #'right
        cv2.line(frame, (cross_line_w - 40, cross_line_h - 5),
                 (cross_line_w - 40, cross_line_h + 5), green, 2)  #''left
        cv2.line(frame, (cross_line_w + 40, cross_line_h - 5),
                 (cross_line_w + 40, cross_line_h + 5), green, 2)  #''right
        cv2.line(frame, (cross_line_w - 60, cross_line_h - 5),
                 (cross_line_w - 60, cross_line_h + 5), green, 2)  #'''left
        cv2.line(frame, (cross_line_w + 60, cross_line_h - 5),
                 (cross_line_w + 60, cross_line_h + 5), green, 2)  #'''right

        cv2.line(frame, (cross_line_w - 80, cross_line_h),
                 (cross_line_w + 80, cross_line_h), green, 2)  #line x
        cv2.line(frame, (cross_line_w - 5, cross_line_h - 20),
                 (cross_line_w + 5, cross_line_h - 20), green, 2)  #'up
        cv2.line(frame, (cross_line_w - 5, cross_line_h + 20),
                 (cross_line_w + 5, cross_line_h + 20), green, 2)  #'down
        cv2.line(frame, (cross_line_w - 5, cross_line_h - 40),
                 (cross_line_w + 5, cross_line_h - 40), green, 2)  #''up
        cv2.line(frame, (cross_line_w - 5, cross_line_h + 40),
                 (cross_line_w + 5, cross_line_h + 40), green, 2)  #''down
        cv2.line(frame, (cross_line_w - 5, cross_line_h - 60),
                 (cross_line_w + 5, cross_line_h - 60), green, 2)  #'''up
        cv2.line(frame, (cross_line_w - 5, cross_line_h + 60),
                 (cross_line_w + 5, cross_line_h + 60), green, 2)  #'''down

        cv2.imshow('NCS live inference', frame)
def infer_image_fps(graph, img, frame, fps):
    global direction
    global counter
    global plotbuff
    a = []
    # Load the image as a half-precision floating point array
    graph.LoadTensor(img, 'user object')

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption(mvnc.GraphOption.TIME_TAKEN)

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd(output, CONFIDANCE_THRESHOLD,
                                         frame.shape)

    #    elapsedtime = time.time() - starttime
    # Print the results (each image/frame may have multiple objects)
    #    print( "I found these objects in ( %.2f ms ):" % ( numpy.sum( inference_time ) ) )
    inftime = numpy.sum(inference_time)
    numobj = (output_dict['num_detections'])

    # create array for detected obj
    a = [[] for _ in range(numobj)]

    #    print (numobj)
    cpu = psutil.cpu_percent()

    for i in range(0, output_dict['num_detections']):
        print("%3.1f%%\t" % output_dict['detection_scores_' + str(i)] +
              labels[int(output_dict['detection_classes_' + str(i)])] +
              ": Top Left: " +
              str(output_dict['detection_boxes_' + str(i)][0]) +
              " Bottom Right: " +
              str(output_dict['detection_boxes_' + str(i)][1]))
        #        print(str(i))
        a[i].append(output_dict['detection_scores_' + str(i)])
        a[i].append(labels[int(output_dict['detection_classes_' + str(i)])])
        a[i].append(str(output_dict['detection_boxes_' + str(i)][0]))
        a[i].append(str(output_dict['detection_boxes_' + str(i)][1]))
        # Draw bounding boxes around valid detections
        (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
        (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]
        # spencer: moving direction...
        avgx = x2 - x1
        avgy = y2 - y1
        prevavg = (y1, x1)

        plotbuff.appendleft(prevavg)

        for j in np.arange(1, len(plotbuff)):
            if plotbuff[j - 1] is None or plotbuff[j] is None:
                continue
            if counter >= 4 and j == 1 and len(plotbuff) == PLOTBUFFSIZE:
                #                dX = plotbuff[4][0] - plotbuff[j][0]
                dY = plotbuff[j][0] - plotbuff[4][0]
                #                dY = plotbuff[4][1] - plotbuff[j][1]
                dX = plotbuff[j][1] - plotbuff[4][1]
                (dirY, dirX) = ("", "")
                #                print((dY,dX))

                if np.abs(dX) > 10:
                    dirX = "EAST" if np.sign(dX) == 1 else "WEST"

                if np.abs(dY) > 20:
                    dirY = "SOUTH" if np.sign(dY) == 1 else "NORTH"

                if dirX != "" and dirY != "":
                    direction = "{}-{}".format(dirY, dirX)

                else:
                    direction = dirX if dirX != "" else dirY

        print(direction)
        print(avgx)
        if direction == "EAST" or direction == "EAST-SOUTH" or direction == "EAST-NORTH":
            if avgx >= width - 200:
                print("{} MOVING EAST OUT OF THE SCENE", format(avgx))
        if direction == "WEST" or direction == "WEST-SOUTH" or direction == "WEST-NORTH":
            if avgx <= 200:
                print("{} MOVING WEST OUT OF THE SCENE", format(avgx))

        # Prep string to overlay on the image
        display_str = (labels[output_dict.get('detection_classes_' + str(i))] +
                       ": " +
                       str(output_dict.get('detection_scores_' + str(i))) +
                       "%")

        frame = visualize_output.draw_bounding_box(y1,
                                                   x1,
                                                   y2,
                                                   x2,
                                                   frame,
                                                   thickness=4,
                                                   color=(255, 255, 0),
                                                   display_str=display_str)
        cv2.putText(frame, 'FPS:' + str(fps), (10, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 2,
                    cv2.LINE_AA)
        cv2.putText(frame, direction, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.65,
                    (0, 0, 255), 3)
#    print( '\n' )

# If a display is available, show the image on which inference was performed
    counter += 1
    if displayit == "on":
        cv2.imshow('NCS live inference', frame)

    # need to log here.
#    print (elapsedtime)
# for tracking one human...
    f.write(
        str("{0:.2f}".format(elapsedtime)) + '\t' + str(cpu) + '\t' +
        str("{0:.2f}".format(inftime)) + '\t' + str("{0:.2f}".format(fps)) +
        '\t' + str(numobj) + '\t' + str(direction) + '\t' + str(a) + '\n')
    #    print(a)
    # need plots...! for multiple objects
    del (a)
Ejemplo n.º 18
0
                                     preserve_range=True)
    input = input[:, :, ::-1]  # RGB -> BGR
    input = input.astype(np.float16)
    input = (input - np.float16(MEAN)) * SCALE  # subtract mean & scale

    # warm start with a dummy forward pass
    graph.LoadTensor(input, 'user object')
    output, userobj = graph.GetResult()

    # actual timed inference
    graph.LoadTensor(input, 'user object')
    output, userobj = graph.GetResult()
    inference_time = graph.GetGraphOption(mvnc.GraphOption.TIME_TAKEN)

    # deserialize and print output
    output_dict = deserialize_output.ssd(output, CONFIDENCE_THRESHOLD,
                                         input.shape)

    print("Execution time: " + str(np.sum(inference_time)) + "ms")
    for i in range(0, output_dict['num_detections']):
        print("%3.1f%%\t" % output_dict['detection_scores_' + str(i)] +
              labels[int(output_dict['detection_classes_' + str(i)])] +
              ": Top Left: " +
              str(output_dict['detection_boxes_' + str(i)][0]) +
              " Bottom Right: " +
              str(output_dict['detection_boxes_' + str(i)][1]))

finally:
    # cleanup
    graph.DeallocateGraph()
    movidius.CloseDevice()
Ejemplo n.º 19
0
def infer_image(graph, img, frame, motor, pid):

    # Load the image as a half-precision floating point array
    graph.LoadTensor(img, 'user object')

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption(mvnc.GraphOption.TIME_TAKEN)

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd(output, CONFIDANCE_THRESHOLD,
                                         frame.shape)

    # Print the results (each image/frame may have multiple objects)
    # print( "I found these objects in "
    #        + " ( %.2f ms ):" % ( numpy.sum( inference_time ) ) )

    count_person = 0
    for i in range(0, output_dict['num_detections']):
        # Only interested in person.
        if labels[output_dict['detection_classes_' + str(i)]] != "15: person":
            continue
        if count_person > 0:
            continue
        count_person = count_person + 1
        #print( "%3.1f%%\t" % output_dict['detection_scores_' + str(i)]
        #       + labels[ int(output_dict['detection_classes_' + str(i)]) ]
        #       + ": Top Left: " + str( output_dict['detection_boxes_' + str(i)][0] )
        #       + " Bottom Right: " + str( output_dict['detection_boxes_' + str(i)][1] ) )

        # Draw bounding boxes around valid detections
        (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
        (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]
        mid = (x1 + x2) / 2
        dis = 160 - mid
        pid.update(dis)
        print("distance: %d " % dis, end='', flush=True)
        if pid.output > 30:
            motor.direction = int(numpy.interp(pid.output, [0, 160], [1, 100]))
            print("direction: %d\n" % motor.direction)
        elif pid.output < -30:
            motor.direction = int(
                numpy.interp(pid.output, [-160, 0], [-100, -1]))
            print("direction: %d\n" % motor.direction)
        else:
            motor.direction = 0

        # Prep string to overlay on the image
        #display_str = (
        #        labels[output_dict.get('detection_classes_' + str(i))]
        #        + ": "
        #        + str( output_dict.get('detection_scores_' + str(i) ) )
        #        + "%" )

        #frame = visualize_output.draw_bounding_box(
        #               y1, x1, y2, x2,
        #               frame,
        #               thickness=4,
        #               color=(255, 255, 0),
        #               display_str=display_str )

    if count_person == 0:
        motor.direction = 0
def infer_image(graph, img, frame):

    # Load the image as a half-precision floating point array
    graph.LoadTensor(img, 'user object')

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption(mvnc.GraphOption.TIME_TAKEN)

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd(output, CONFIDANCE_THRESHOLD,
                                         frame.shape)

    # Print the results (each image/frame may have multiple objects)
    for i in range(0, output_dict['num_detections']):

        # Filter a specific class/category
        if ((output_dict.get('detection_classes_' + str(i)) == CLASS_PERSON) or
            (output_dict.get('detection_classes_' + str(i)) == CLASS_CAR) or
            (output_dict.get('detection_classes_' + str(i)) == CLASS_FACE)):

            cur_time = strftime("%Y_%m_%d_%H_%M_%S", localtime())
            print("Person detected on " + cur_time)

            # Print the results (each image/frame may have multiple objects)
            print("I found these objects in " + " ( %.2f ms ):" %
                  (numpy.sum(inference_time)))

            print("%3.1f%%\t" % output_dict['detection_scores_' + str(i)] +
                  labels[int(output_dict['detection_classes_' + str(i)])] +
                  ": Top Left: " +
                  str(output_dict['detection_boxes_' + str(i)][0]) +
                  " Bottom Right: " +
                  str(output_dict['detection_boxes_' + str(i)][1]))

            # Extract top-left & bottom-right coordinates of detected objects
            (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
            (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

            #center x, y target point of object box
            cx = x1 + (x2 - x1) / 2
            cy = y1 + (y2 - y1) / 2
            print(cx, cy)
            # print target number 終端顯示目標號碼
            print('#' + str(i + 1))
            # 追蹤目標識別率最大#1 OR #2
            if (i + 1) == 1 or 2:
                #servo motor
                if cx < width * 3 / 7:
                    arduino.write(b'4')
                    print('4')
                if cx < width * 2 / 7:
                    arduino.write(b'4')
                    print('44')
                if cx < width * 1 / 7:
                    arduino.write(b'44')
                    print('444')
                if cx > width * 4 / 7:
                    arduino.write(b'6')
                    print('6')
                if cx > width * 5 / 7:
                    arduino.write(b'6')
                    print('66')
                if cx > width * 6 / 7:
                    arduino.write(b'66')
                    print('666')

                if cy < height * 3 / 7:
                    arduino.write(b'2')
                    print('2')
                if cy < height * 2 / 7:
                    arduino.write(b'2')
                    print('22')
                if cy < height * 1 / 7:
                    arduino.write(b'22')
                    print('222')

                if cy > height * 4 / 7:
                    arduino.write(b'8')
                    print('8')
                if cy > height * 5 / 7:
                    arduino.write(b'8')
                    print('88')
                if cy > height * 6 / 7:
                    arduino.write(b'88')
                    print('888')
            '''
            #servo motor                      
            if cx < width*3 / 7 :
                 arduino.write(b'4')
                 print ('4')
            if cx < width*2 / 7 :
                 arduino.write(b'4')
                 print ('44')
            if cx < width*1 / 7 :
                 arduino.write(b'44')
                 print ('444')
            if cx > width*4 / 7 :
                 arduino.write(b'6')
                 print ('6')
            if cx > width*5 / 7 :
                 arduino.write(b'6')
                 print ('66')
            if cx > width*6 / 7 :
                 arduino.write(b'66')
                 print ('666')

            if cy < height*3 / 7 :
                 arduino.write(b'2')
                 print ('2')
            if cy < height*2 / 7 :
                 arduino.write(b'2')
                 print ('22')
            if cy < height*1 / 7 :
                 arduino.write(b'22')
                 print ('222')

            if cy > height*4 / 7 :
                 arduino.write(b'8')
                 print ('8')
            if cy > height*5 / 7 :
                 arduino.write(b'8')
                 print ('88')
            if cy > height*6 / 7 :
                 arduino.write(b'88')
                 print ('888')
            '''
            # Prep string to overlay on the image
            display_str = (
                labels[output_dict.get('detection_classes_' + str(i))] + ": " +
                str(output_dict.get('detection_scores_' + str(i))) + "%")

            # Overlay bounding boxes, detection class and scores
            frame = visualize_output.draw_bounding_box(y1,
                                                       x1,
                                                       y2,
                                                       x2,
                                                       frame,
                                                       thickness=4,
                                                       color=(255, 255, 0),
                                                       display_str=display_str)
            # follow box of number, 隨目標框顯示目標號碼(號碼依目標識別率最大依序排列)
            cv2.putText(frame, '#' + str(i + 1), (x1, y1 - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2,
                        cv2.LINE_AA)
            # 使用1種字體,fire or Capture snapshots
            if cx <= (width * 4 / 7) and cx >= (width * 3 / 7) and cy <= (
                    height * 4 / 7) and cy >= (height * 3 / 7):
                text = 'Locking'
                cv2.putText(frame, text, (300, 40), cv2.FONT_HERSHEY_SIMPLEX,
                            1, (0, 0, 255), 2, cv2.LINE_AA)
                # show targets number
                cv2.putText(frame, 'Targets:' + max(str(i + 1)), (10, 100),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2,
                            cv2.LINE_AA)
                # fire to near me and the #1, 攻擊靠近本體(高威脅)和#1目標
                if ((x2 - x1) *
                    (y2 - y1)) >= (width * height) / 3 and (i + 1) == 1:
                    arduino.write(b'5')
                    print('fire')
                    text = 'FIRE'
                    cv2.putText(frame, text, (500, 40),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2,
                                cv2.LINE_AA)

                    # Cross Line in snapshots

                    green = (0, 255, 0)
                    red = (0, 0, 255)
                    cv2.line(frame, (cross_line_w, cross_line_h - 80),
                             (cross_line_w, cross_line_h + 80), green,
                             2)  #line y
                    cv2.line(frame, (cross_line_w - 20, cross_line_h - 5),
                             (cross_line_w - 20, cross_line_h + 5), green,
                             2)  #,left
                    cv2.line(frame, (cross_line_w + 20, cross_line_h - 5),
                             (cross_line_w + 20, cross_line_h + 5), green,
                             2)  #,right
                    cv2.line(frame, (cross_line_w - 40, cross_line_h - 5),
                             (cross_line_w - 40, cross_line_h + 5), green,
                             2)  #,,left
                    cv2.line(frame, (cross_line_w + 40, cross_line_h - 5),
                             (cross_line_w + 40, cross_line_h + 5), green,
                             2)  #,,right
                    cv2.line(frame, (cross_line_w - 60, cross_line_h - 5),
                             (cross_line_w - 60, cross_line_h + 5), green,
                             2)  #,,,left
                    cv2.line(frame, (cross_line_w + 60, cross_line_h - 5),
                             (cross_line_w + 60, cross_line_h + 5), green,
                             2)  #,,,right
                    cv2.line(frame, (cross_line_w - 80, cross_line_h),
                             (cross_line_w + 80, cross_line_h), green,
                             2)  #line x
                    cv2.line(frame, (cross_line_w - 5, cross_line_h - 20),
                             (cross_line_w + 5, cross_line_h - 20), green,
                             2)  #,up
                    cv2.line(frame, (cross_line_w - 5, cross_line_h + 20),
                             (cross_line_w + 5, cross_line_h + 20), green,
                             2)  #,down
                    cv2.line(frame, (cross_line_w - 5, cross_line_h - 40),
                             (cross_line_w + 5, cross_line_h - 40), green,
                             2)  #,,up
                    cv2.line(frame, (cross_line_w - 5, cross_line_h + 40),
                             (cross_line_w + 5, cross_line_h + 40), green,
                             2)  #,,down
                    cv2.line(frame, (cross_line_w - 5, cross_line_h - 60),
                             (cross_line_w + 5, cross_line_h - 60), green,
                             2)  #,,,up
                    cv2.line(frame, (cross_line_w - 5, cross_line_h + 60),
                             (cross_line_w + 5, cross_line_h + 60), green,
                             2)  #,,,down

                    # Capture snapshots
                    photo = (os.path.dirname(os.path.realpath(__file__)) +
                             "/captures/photo_" + cur_time + ".jpg")
                    cv2.imwrite(photo, frame)

            else:
                text = 'Serching....'
                cv2.putText(frame, text, (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 1,
                            (0, 255, 0), 2, cv2.LINE_AA)
                # show targets number
                cv2.putText(frame, 'Targets:' + max(str(i + 1)), (10, 100),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2,
                            cv2.LINE_AA)
            '''
            # Capture snapshots
            photo = ( os.path.dirname(os.path.realpath(__file__))
                      + "/captures/photo_"
                      + cur_time + ".jpg" )
            cv2.imwrite( photo, frame )
            '''

    # If a display is available, show the image on which inference was performed
    if 'DISPLAY' in os.environ:

        # Cross Line
        green = (0, 255, 0)
        red = (0, 0, 255)
        cv2.line(frame, (cross_line_w, cross_line_h - 80),
                 (cross_line_w, cross_line_h + 80), green, 2)  #line y
        cv2.line(frame, (cross_line_w - 20, cross_line_h - 5),
                 (cross_line_w - 20, cross_line_h + 5), green, 2)  #,left
        cv2.line(frame, (cross_line_w + 20, cross_line_h - 5),
                 (cross_line_w + 20, cross_line_h + 5), green, 2)  #,right
        cv2.line(frame, (cross_line_w - 40, cross_line_h - 5),
                 (cross_line_w - 40, cross_line_h + 5), green, 2)  #,,left
        cv2.line(frame, (cross_line_w + 40, cross_line_h - 5),
                 (cross_line_w + 40, cross_line_h + 5), green, 2)  #,,right
        cv2.line(frame, (cross_line_w - 60, cross_line_h - 5),
                 (cross_line_w - 60, cross_line_h + 5), green, 2)  #,,,left
        cv2.line(frame, (cross_line_w + 60, cross_line_h - 5),
                 (cross_line_w + 60, cross_line_h + 5), green, 2)  #,,,right
        cv2.line(frame, (cross_line_w - 80, cross_line_h),
                 (cross_line_w + 80, cross_line_h), green, 2)  #line x
        cv2.line(frame, (cross_line_w - 5, cross_line_h - 20),
                 (cross_line_w + 5, cross_line_h - 20), green, 2)  #,up
        cv2.line(frame, (cross_line_w - 5, cross_line_h + 20),
                 (cross_line_w + 5, cross_line_h + 20), green, 2)  #,down
        cv2.line(frame, (cross_line_w - 5, cross_line_h - 40),
                 (cross_line_w + 5, cross_line_h - 40), green, 2)  #,,up
        cv2.line(frame, (cross_line_w - 5, cross_line_h + 40),
                 (cross_line_w + 5, cross_line_h + 40), green, 2)  #,,down
        cv2.line(frame, (cross_line_w - 5, cross_line_h - 60),
                 (cross_line_w + 5, cross_line_h - 60), green, 2)  #,,,up
        cv2.line(frame, (cross_line_w - 5, cross_line_h + 60),
                 (cross_line_w + 5, cross_line_h + 60), green, 2)  #,,,down

        cv2.imshow('NCS live inference', frame)
Ejemplo n.º 21
0
def infer_image(graph, img, frame):

    # Load the image as a half-precision floating point array
    graph.LoadTensor(img, 'user object')

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption(mvnc.GraphOption.TIME_TAKEN)

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd(output, CONFIDANCE_THRESHOLD,
                                         frame.shape)

    # Print the results (each image/frame may have multiple objects)
    for i in range(0, output_dict['num_detections']):

        # Filter a specific class/category
        if ((output_dict.get('detection_classes_' + str(i)) == CLASS_PERSON) or
            (output_dict.get('detection_classes_' + str(i)) == CLASS_CAR)):

            cur_time = strftime("%Y_%m_%d_%H_%M_%S", localtime())
            print("Person detected on " + cur_time)

            # Print the results (each image/frame may have multiple objects)
            print("I found these objects in " + " ( %.2f ms ):" %
                  (numpy.sum(inference_time)))

            print("%3.1f%%\t" % output_dict['detection_scores_' + str(i)] +
                  labels[int(output_dict['detection_classes_' + str(i)])] +
                  ": Top Left: " +
                  str(output_dict['detection_boxes_' + str(i)][0]) +
                  " Bottom Right: " +
                  str(output_dict['detection_boxes_' + str(i)][1]))

            # Extract top-left & bottom-right coordinates of detected objects
            (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
            (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

            #center x, y target point of object box
            cx = x1 + (x2 - x1) / 2
            cy = y1 + (y2 - y1) / 2
            print(cx, cy)
            # print target number 終端顯示目標號碼
            print('#' + str(i + 1))

            # 追蹤目標識別率最大#1
            if (i + 1) == 1:
                #servo motor
                if cx < width * 5 / 10 and cx > width * 4 / 10:
                    arduino.write(b'e')
                    print('e')

                if cx < width * 4 / 10 and cx > width * 3 / 10:
                    arduino.write(b'd')
                    print('d')
                if cx < width * 3 / 10 and cx > width * 2 / 10:
                    arduino.write(b'c')
                    print('c')
                if cx < width * 2 / 10 and cx > width / 10:
                    arduino.write(b'b')
                    print('b')
                if cx < width / 10:
                    arduino.write(b'a')
                    print('a')

                if cx > width * 6 / 10 and cx < width * 7 / 10:
                    arduino.write(b'f')
                    print('f')
                if cx > width * 7 / 10 and cx < width * 8 / 10:
                    arduino.write(b'g')
                    print('g')
                if cx > width * 8 / 10 and cx < width * 9 / 10:
                    arduino.write(b'h')
                    print('h')
                if cx > width * 9 / 10:
                    arduino.write(b'i')
                    print('i')

                if cy < height * 5 / 10 and cy > height * 4 / 10:
                    arduino.write(b'n')
                    print('n')
                if cy < height * 4 / 10 and cy > height * 3 / 10:
                    arduino.write(b'm')
                    print('m')
                if cy < height * 3 / 10 and cy > height * 2 / 10:
                    arduino.write(b'l')
                    print('l')
                if cy < height * 2 / 10 and cy > height / 10:
                    arduino.write(b'k')
                    print('k')
                if cy < height / 10:
                    arduino.write(b'j')
                    print('j')

                if cy > height * 6 / 10 and cy < height * 7 / 10:
                    arduino.write(b'o')
                    print('o')
                if cy > height * 7 / 10 and cy < height * 8 / 10:
                    arduino.write(b'p')
                    print('p')
                if cy > height * 8 / 10 and cy < height * 9 / 10:
                    arduino.write(b'q')
                    print('q')
                if cy > height * 9 / 10:
                    arduino.write(b'r')
                    print('r')
            '''
            #servo motor
            if cx < width*5/ 10 and cx > width*4/ 10 :                  
                 arduino.write(b'e')                
                 print ('e')
            
            if cx < width*4/ 10 and cx > width*3/ 10 :                  
                 arduino.write(b'd')                
                 print ('d')
            if cx < width*3/ 10 and cx > width*2/ 10 :                  
                 arduino.write(b'c')                
                 print ('c')                        
            if cx < width*2/ 10 and cx > width/ 10 :
                 arduino.write(b'b')
                 print ('b')
            if cx < width/ 10 :
                 arduino.write(b'a')
                 print ('a')

            if cx > width*6 / 10 and cx < width*7/ 10 :
                 arduino.write(b'f')
                 print ('f')
            if cx > width*7/ 10 and cx < width*8/ 10 :
                 arduino.write(b'g')
                 print ('g')
            if cx > width*8/ 10 and cx < width*9/ 10 :
                 arduino.write(b'h')
                 print ('h')
            if cx > width*9/ 10 :
                 arduino.write(b'i')
                 print ('i')

            if cy < height*5/ 10 and cy > height*4/ 10 :
                 arduino.write(b'n')
                 print ('n')
            if cy < height*4/ 10 and cy > height*3/ 10 :
                 arduino.write(b'm')
                 print ('m')
            if cy < height*3/ 10 and cy > height*2/ 10 :
                 arduino.write(b'l')
                 print ('l')
            if cy < height*2/ 10 and cy > height/ 10 :
                 arduino.write(b'k')
                 print ('k')
            if cy < height/ 10:
                 arduino.write(b'j')
                 print ('j')


            if cy > height*6 / 10 and cy < height*7 / 10 :
                 arduino.write(b'o')
                 print ('o')
            if cy > height*7 / 10 and cy < height*8 / 10 :
                 arduino.write(b'p')
                 print ('p')
            if cy > height*8 / 10 and cy < height*9 / 10 :
                 arduino.write(b'q')
                 print ('q')     
            if cy > height*9 / 10:
                 arduino.write(b'r')
                 print ('r')
            '''

            # Prep string to overlay on the image
            display_str = (
                labels[output_dict.get('detection_classes_' + str(i))] + ": " +
                str(output_dict.get('detection_scores_' + str(i))) + "%")

            # Overlay bounding boxes, detection class and scores
            frame = visualize_output.draw_bounding_box(y1,
                                                       x1,
                                                       y2,
                                                       x2,
                                                       frame,
                                                       thickness=4,
                                                       color=(255, 255, 0),
                                                       display_str=display_str)
            # follow box of number, 隨目標框顯示目標號碼(號碼依目標識別率最大依序排列)
            cv2.putText(frame, '#' + str(i + 1), (x1, y1 - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2,
                        cv2.LINE_AA)
            cv2.putText(frame, 'Targets:' + str(i + 1), (10, 100),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2,
                        cv2.LINE_AA)
            # fire
            if ((x2 - x1) *
                (y2 - y1)) >= (width * height) / 4 and (i + 1) == 1:
                arduino.write(b'5')
                print('fire')
                text = 'FIRE'
                cv2.putText(frame, text, (500, 40), cv2.FONT_HERSHEY_SIMPLEX,
                            1, (0, 0, 255), 2, cv2.LINE_AA)

                # Capture snapshots
                photo = (os.path.dirname(os.path.realpath(__file__)) +
                         "/captures/photo_" + cur_time + ".jpg")
                cv2.imwrite(photo, frame)
            '''
            # Capture snapshots
            photo = ( os.path.dirname(os.path.realpath(__file__))
                      + "/captures/photo_"
                      + cur_time + ".jpg" )
            cv2.imwrite( photo, frame )
            '''
    # If a display is available, show the image on which inference was performed
    if 'DISPLAY' in os.environ:
        cv2.imshow('NCS live inference', frame)
Ejemplo n.º 22
0
def infer_image_fps(graph, img, frame, fps):
    #    global direction
    global counter
    #    global plotbuff
    a = []
    # Load the image as a half-precision floating point array
    graph.LoadTensor(img, 'user object')

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption(mvnc.GraphOption.TIME_TAKEN)

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd(output, CONFIDANCE_THRESHOLD,
                                         frame.shape)

    #    elapsedtime = time.time() - starttime
    # Print the results (each image/frame may have multiple objects)
    #    print( "I found these objects in ( %.2f ms ):" % ( numpy.sum( inference_time ) ) )
    inftime = numpy.sum(inference_time)
    numobj = (output_dict['num_detections'])

    # create array for detected obj
    a = [[] for _ in range(numobj)]

    #    print (numobj)
    cpu = psutil.cpu_percent()

    for i in range(0, output_dict['num_detections']):
        print("%3.1f%%\t" % output_dict['detection_scores_' + str(i)] +
              labels[int(output_dict['detection_classes_' + str(i)])] +
              ": Top Left: " +
              str(output_dict['detection_boxes_' + str(i)][0]) +
              " Bottom Right: " +
              str(output_dict['detection_boxes_' + str(i)][1]))
        #        print(str(i))
        a[i].append(output_dict['detection_scores_' + str(i)])
        a[i].append(labels[int(output_dict['detection_classes_' + str(i)])])
        a[i].append(str(output_dict['detection_boxes_' + str(i)][0]))
        a[i].append(str(output_dict['detection_boxes_' + str(i)][1]))
        # Draw bounding boxes around valid detections
        (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
        (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

        # Prep string to overlay on the image

        display_str = (labels[output_dict.get('detection_classes_' + str(i))] +
                       ": " +
                       str(output_dict.get('detection_scores_' + str(i))) +
                       "%")

        frame = visualize_output.draw_bounding_box(y1,
                                                   x1,
                                                   y2,
                                                   x2,
                                                   frame,
                                                   thickness=4,
                                                   color=(255, 255, 0),
                                                   display_str=display_str)
        cv2.putText(frame, 'FPS:' + str(fps), (10, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 2,
                    cv2.LINE_AA)
#        cv2.putText(frame, direction, (10,30), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0,0,255),3)
#    print( '\n' )

# If a display is available, show the image on which inference was performed
    counter += 1
    if displayit == "on":
        cv2.imshow('NCS live inference', frame)

    # need to log here.
#    print (elapsedtime)
# for tracking one human...
#    print(str("{0:.2f}".format(elapsedtime)) + '\t' + str(cpu) + '\t' + str("{0:.2f}".format(inftime)) + '\t' + str("{0:.2f}".format(fps)) +'\t' + str(numobj)+'\t'+str(a)+'\n')

# need to save to redis.
    save = {
        "elapsedtime": "{0:.2f}".format(elapsedtime),
        "CPU": str(cpu),
        "inftime": str("{0:.2f}".format(inftime)),
        "fps": str("{0:.2f}".format(fps)),
        "numberofobjects": str(numobj),
        "a": str(a)
    }

    r.hmset(counter, save)
    #    r.sadd("myset",save)
    print(r.hgetall(counter))
    #print(save)
    # need plots...! for multiple objects
    del (a)
Ejemplo n.º 23
0
def infer_image(graph, img, frame):

    # Load the image as a half-precision floating point array
    graph.LoadTensor(img, 'user object')

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption(mvnc.GraphOption.TIME_TAKEN)

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd(output, CONFIDANCE_THRESHOLD,
                                         frame.shape)

    # Print the results (each image/frame may have multiple objects)
    for i in range(0, output_dict['num_detections']):

        # Filter a specific class/category
        if (output_dict.get('detection_classes_' + str(i)) == CLASS_PERSON):

            # Time
            d = datetime.now()
            cur_time = strftime("%Y_%m_%d_%H_%M_%S", localtime())
            print("Person detected on " + cur_time)

            # Extract top-left & bottom-right coordinates of detected objects
            (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
            (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]

            # Prep string to overlay on the image
            display_str = (
                labels[output_dict.get('detection_classes_' + str(i))] + ": " +
                str(output_dict.get('detection_scores_' + str(i))) + "%")

            # Overlay bounding boxes, detection class and scores
            frame = visualize_output.draw_bounding_box(y1,
                                                       x1,
                                                       y2,
                                                       x2,
                                                       frame,
                                                       thickness=4,
                                                       color=(255, 255, 0),
                                                       display_str=display_str)

            # Capture snapshots
            photo = (os.path.dirname(os.path.realpath(__file__)) +
                     "/captures/photo_" + cur_time + ".jpg")

            cv2.imwrite(photo, frame)

            #IFTT notification
            global d_last
            d_diff = d - d_last

            if (d_diff.seconds > MINTIME):
                print('\033[31m' + 'Send Notification to IFTTT --- ' +
                      '\033[0m')  # Red text
                #print(d)
                #print(d_last)
                #print(d_diff)
                #print("    ")
                r = requests.post(
                    'https://maker.ifttt.com/trigger/rasp_seccam_triggered/with/key/c_6oKb50WdIWAaelvo3EINQ8ZU9ibwxNFJiBV1phPuh',
                    params={
                        "value1": cur_time,
                        "value2": photo,
                        "value3": "none"
                    })
                os.system("rclone copy " + photo + " gdrive:rclone")
                d_last = d

    # If a display is available, show the image on which inference was performed
    if 'DISPLAY' in os.environ:
        cv2.imshow('NCS live inference', frame)
Ejemplo n.º 24
0
def infer_image(graph, img, frame):
    #from pySX127x_master.Tegwyns_LoRa_Beacon import LoRaBeacon
    #from pySX127x_master import Tegwyns_LoRa_Beacon
    # Load the image as a half-precision floating point array
    graph.LoadTensor(img, 'user object')

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption(mvnc.GraphOption.TIME_TAKEN)

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd(output, CONFIDANCE_THRESHOLD,
                                         frame.shape)

    # Print the results (each image/frame may have multiple objects)
    for i in range(0, output_dict['num_detections']):

        # Filter a specific class/category
        if (output_dict.get('detection_classes_' + str(i)) == CLASS_PERSON):

            cur_time = strftime("%Y_%m_%d_%H_%M_%S", localtime())
            print("Person detected on " + cur_time)
            print(".... Press q to quit ..... ")

            # Extract top-left & bottom-right coordinates of detected objects
            (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
            (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]
            #print (y1, x1)
            # Prep string to overlay on the image
            display_str = (
                labels[output_dict.get('detection_classes_' + str(i))] + ": " +
                str(output_dict.get('detection_scores_' + str(i))) + "%")
            print(display_str)
            print(y1, x1)
            print(y2, x2)
            # Overlay bounding boxes, detection class and scores
            frame = visualize_output.draw_bounding_box(y1,
                                                       x1,
                                                       y2,
                                                       x2,
                                                       frame,
                                                       thickness=4,
                                                       color=(255, 255, 0),
                                                       display_str=display_str)
            global myString
            myString = display_str + " , " + "(" + str(y1) + "," + str(
                x1) + ")" + "," + "(" + str(y2) + "," + str(x2) + ")"

            ###########################################################################################
            lora.start()
            ###########################################################################################

            # Capture snapshots
            photo = (os.path.dirname(os.path.realpath(__file__)) +
                     "/captures/photo_" + cur_time + ".jpg")
            cv2.imwrite(photo, frame)

    # If a display is available, show the image on which inference was performed
    if 'DISPLAY' in os.environ:
        cv2.imshow('NCS live inference', frame)
Ejemplo n.º 25
0
def infer_image( graph, img, frame, motor ):

    # Load the image as a half-precision floating point array
    graph.LoadTensor( img, 'user object' )

    # Get the results from NCS
    output, userobj = graph.GetResult()

    # Get execution time
    inference_time = graph.GetGraphOption( mvnc.GraphOption.TIME_TAKEN )

    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd( 
                      output, 
                      CONFIDANCE_THRESHOLD, 
                      frame.shape )

    # Print the results (each image/frame may have multiple objects)
    print( "I found these objects in "
            + " ( %.2f ms ):" % ( numpy.sum( inference_time ) ) )

    count_person = 0
    for i in range( 0, output_dict['num_detections'] ):
        # Only interested in person.
        if labels[output_dict['detection_classes_' + str(i)]] != "15: person":
            continue
        if count_person > 0:
            continue
        count_person = count_person + 1
        print( "%3.1f%%\t" % output_dict['detection_scores_' + str(i)] 
               + labels[ int(output_dict['detection_classes_' + str(i)]) ]
               + ": Top Left: " + str( output_dict['detection_boxes_' + str(i)][0] )
               + " Bottom Right: " + str( output_dict['detection_boxes_' + str(i)][1] ) )

        # Draw bounding boxes around valid detections 
        (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
        (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]
        print("x1 x2: %d %d\n" % (x1, x2))
        mid = (x1+x2)/2
        dis = 320 - mid
        print("distance: %d\n" % dis)
        if dis > 20:
            motor.direction = "forward"
        elif dis < -20:
            motor.direction = "backward"
        else:
            motor.direction = "stop"

        # Prep string to overlay on the image
        display_str = ( 
                labels[output_dict.get('detection_classes_' + str(i))]
                + ": "
                + str( output_dict.get('detection_scores_' + str(i) ) )
                + "%" )

        frame = visualize_output.draw_bounding_box( 
                       y1, x1, y2, x2, 
                       frame,
                       thickness=4,
                       color=(255, 255, 0),
                       display_str=display_str )
    print( '\n' )

    # If a display is available, show the image on which inference was performed
    if 'DISPLAY' in os.environ:
        cv2.imshow( 'NCS live inference', frame )