def main():
        print('Running NCS Caffe TinyYolo example')

        # Set logging level and initialize/open the first NCS we find
        mvnc.SetGlobalOption(mvnc.GlobalOption.LOG_LEVEL, 0)
        devices = mvnc.EnumerateDevices()
        if len(devices) == 0:
            print('No devices found')
            return 1
        device = mvnc.Device(devices[0])
        device.OpenDevice()

        # Load graph from disk and allocate graph via API
        # lilinwei modify code start

        tiny_yolo_graph_file = os.path.join(os.path.dirname(__file__), 'graph')
        # lilinwei modify code end
        with open(tiny_yolo_graph_file, mode='rb') as f:
            graph_from_disk = f.read()

        graph = device.AllocateGraph(graph_from_disk)

        # Read image from file, resize it to network width and height
        # save a copy in display_image for display, then convert to float32, normalize (divide by 255),
        # and finally convert to convert to float16 to pass to LoadTensor as input for an inference
        input_image_file_ = 'data/images/photo.jpg'
        input_image_file = os.path.join(os.path.dirname(__file__),
                                        input_image_file_)
        input_image = cv2.imread(input_image_file)
        display_image = input_image
        NETWORK_IMAGE_WIDTH = 448
        NETWORK_IMAGE_HEIGHT = 448
        input_image = cv2.resize(input_image,
                                 (NETWORK_IMAGE_WIDTH, NETWORK_IMAGE_HEIGHT),
                                 cv2.INTER_LINEAR)
        input_image = input_image.astype(np.float32)
        input_image = np.divide(input_image, 255.0)
        input_image = input_image[:, :, ::-1]  # convert to RGB

        # Load tensor and get result.  This executes the inference on the NCS
        graph.LoadTensor(input_image.astype(np.float16), 'user object')
        output, userobj = graph.GetResult()

        # filter out all the objects/boxes that don't meet thresholds
        filtered_objs = caffe_objecet_direction.filter_objects(
            output.astype(np.float32), input_image.shape[1],
            input_image.shape[0])  # fc27 instead of fc12 for yolo_small

        print('Displaying image with objects detected in GUI')
        print('Click in the GUI window and hit any key to exit')
        # display the filtered objects/boxes in a GUI window

        # lilinwei modify code start
        # for filtered_obj in filtered_objs:
        # print(filtered_obj)
        graph.DeallocateGraph()
        device.CloseDevice()
        return filtered_objs
        # lilinwei modify code end

        # display_objects_in_gui(display_image, filtered_objs)

        # Clean up
        # graph.DeallocateGraph()
        # device.CloseDevice()
        print('Finished')
Exemple #2
0
green_LED = LED(12)  # Physical pin 32
yellow_LED = LED(16)  # Physical pin 36
red_LED = LED(21)  # Physical pin 40

#--------------------------------------------------------------------
#----------------------------NCS Setup-------------------------------
#--------------------------------------------------------------------

# Check if NCS is connected
devices = mvnc.EnumerateDevices()
if len(devices) == 0:
    print('No NCS devices found')
    quit()

# Pick the first stick to run the network
device = mvnc.Device(devices[0])

# Open the NCS
device.OpenDevice()

# The graph file that was created with the ncsdk compiler
graph_file_name = GRAPH_FILENAME

# read in the graph file to memory buffer
with open(graph_file_name, mode='rb') as f:
    graph_in_memory = f.read()

# create the NCAPI graph instance from the memory buffer containing the graph file.
graph = device.AllocateGraph(graph_in_memory)
del graph_in_memory
del f
Exemple #3
0
    gNetworkMean = numpy.load(
        EXAMPLES_BASE_DIR + 'data/ilsvrc12/ilsvrc_2012_mean.npy').mean(1).mean(
            1)  #loading the mean file
    gNetworkStd = numpy.load(EXAMPLES_BASE_DIR +
                             'data/ilsvrc12/ilsvrc_2012_mean.npy').std(1)

    # Load categories from categories.txt
    gNetworkCategories = []
    labels_file = EXAMPLES_BASE_DIR + 'data/ilsvrc12/synset_words.txt'
    gNetworkCategories = numpy.loadtxt(labels_file, str, delimiter='\t')

    fx.SetGlobalOption(fx.GlobalOption.LOG_LEVEL, 2)

    # For this program we will always use the first MVNC device.
    ncs_names = fx.EnumerateDevices()
    if (len(ncs_names) < 1):
        print(
            "Error - No NCS devices detected. Make sure your device is connected."
        )
        quit()
# Initialize the MVNC device

    dev = fx.Device(ncs_names[0])
    dev.OpenDevice()
    gGraph = dev.AllocateGraph(get_graph_from_disk())

    # Initialize input and output threads to pass images to the
    # MVNC device and to read results from the inferences made on thos images.

    start_thread()
# Entry point
if __name__ == "__main__":
    # Set the logging level for the NC API
    fx.global_set_option(fx.GlobalOption.RW_LOG_LEVEL, 0)

    # Get a list of names for all the devices plugged into the system
    devices = fx.enumerate_devices()
    if (len(devices) < 1):
        print(
            "ERROR: No NCS devices detected, verify an NCS device is connected."
        )
        quit()

    # Get the first NCS device by its name
    # For this program we will always open the first NCS device
    dev = fx.Device(devices[0])

    # Try to open the device
    # This will throw an exception if someone else has it open already
    try:
        dev.open()
    except:
        print("ERROR: Could not open NCS device.")
        quit()
    print("INFO: Hello world, NCS! Device opened normally.")

    try:
        dev.close()
    except:
        print("ERROR: could not close NCS device.")
        quit()
Exemple #5
0
def main():
    print('Running NCS Caffe TinyYolo example')

    # Set logging level to only log errors
    mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 3)
    devices = mvnc.enumerate_devices()
    if len(devices) == 0:
        print('No devices found')
        return 1
    device = mvnc.Device(devices[0])
    device.open()

    #Load graph from disk and allocate graph via API
    with open(tiny_yolo_graph_file, mode='rb') as f:
        graph_from_disk = f.read()
    graph = mvnc.Graph("Tiny Yolo Graph")
    fifo_in, fifo_out = graph.allocate_with_fifos(device, graph_from_disk)

    #------------------------------
    # capture
    #------------------------------
    cap = cv2.VideoCapture("cars.mp4")

    #------------------------------
    # pygame init
    #------------------------------
    pygame.init()
    screen = pygame.display.set_mode((640, 360))
    pygame.display.set_caption("TinyYolo VR")
    background = pygame.image.load("aquarium.jpg")
    player = pygame.image.load("jellyfish.png").convert_alpha()
    x_ratio = float(screen.get_width()) / NETWORK_IMAGE_WIDTH
    y_ratio = float(screen.get_height()) / NETWORK_IMAGE_HEIGHT

    while True:
        ret, frame = cap.read()
        if ret == False:
            break

        # Wait key
        key = cv2.waitKey(1)
        if key != -1:
            break

        # Read image from file, resize it to network width and height
        # save a copy in display_image for display, then convert to float32, normalize (divide by 255),
        # and finally convert to convert to float16 to pass to LoadTensor as input for an inference
        #input_image = cv2.imread(input_image_file)
        input_image = frame
        display_image = input_image
        input_image = cv2.resize(input_image,
                                 (NETWORK_IMAGE_WIDTH, NETWORK_IMAGE_HEIGHT),
                                 cv2.INTER_LINEAR)
        input_image = input_image.astype(np.float32)
        input_image = np.divide(input_image, 255.0)
        input_image = input_image[:, :, ::-1]  # convert to RGB

        # Load tensor and get result.  This executes the inference on the NCS
        graph.queue_inference_with_fifo_elem(fifo_in, fifo_out,
                                             input_image.astype(np.float32),
                                             None)
        output, userobj = fifo_out.read_elem()

        # filter out all the objects/boxes that don't meet thresholds
        filtered_objs = filter_objects(output.astype(np.float32),
                                       input_image.shape[1],
                                       input_image.shape[0])

        print('Displaying image with objects detected in GUI')
        print('Click in the GUI window and hit any key to exit')
        #display the filtered objects/boxes in a GUI window
        display_objects_in_gui(display_image, filtered_objs)

        # Display
        cv2.imshow("window", display_image)

        #------------------------------
        # pygame draw
        #------------------------------
        #draw background
        screen.blit(background, (0, 0))

        #draw player
        for obj_index in range(len(filtered_objs)):
            center_x = int(filtered_objs[obj_index][1] * x_ratio)
            center_y = int(filtered_objs[obj_index][2] * y_ratio)
            screen.blit(player, (center_x - player.get_width() / 2,
                                 center_y - player.get_height() / 2))

        #update display
        pygame.display.flip()

    fifo_in.destroy()
    fifo_out.destroy()
    graph.destroy()
    device.close()
    device.destroy()
    print('Finished')

    # Clean up capture
    cap.release()
    cv2.destroyAllWindows()
Exemple #6
0
def infer(imgname):
    # ***************************************************************
    # get labels
    # ***************************************************************
    labels_file = EXAMPLES_BASE_DIR + 'data/ilsvrc12/synset_words.txt'
    labels = numpy.loadtxt(labels_file, str, delimiter='\t')

    # ***************************************************************
    # configure the NCS
    # ***************************************************************
    mvnc.SetGlobalOption(mvnc.GlobalOption.LOG_LEVEL, 2)

    # ***************************************************************
    # Get a list of ALL the sticks that are plugged in
    # ***************************************************************
    devices = mvnc.EnumerateDevices()
    if len(devices) == 0:
        print('No devices found')
        quit()

    # ***************************************************************
    # Pick the first stick to run the network
    # ***************************************************************
    device = mvnc.Device(devices[0])

    # ***************************************************************
    # Open the NCS
    # ***************************************************************
    device.OpenDevice()

    filefolder = os.path.dirname(os.path.realpath(__file__))
    network_blob = filefolder + '/graph'
    system('(cd ' + filefolder + ';test -f graph || make compile)')

    #Load blob
    with open(network_blob, mode='rb') as f:
        blob = f.read()

    graph = device.AllocateGraph(blob)

    # ***************************************************************
    # Load the image
    # ***************************************************************
    ilsvrc_mean = numpy.load(
        EXAMPLES_BASE_DIR + 'data/ilsvrc12/ilsvrc_2012_mean.npy').mean(1).mean(
            1)  #loading the mean file
    img = cv2.imread(imgname)
    img = cv2.resize(img, dim)
    img = img.astype(numpy.float32)
    img[:, :, 0] = (img[:, :, 0] - ilsvrc_mean[0])
    img[:, :, 1] = (img[:, :, 1] - ilsvrc_mean[1])
    img[:, :, 2] = (img[:, :, 2] - ilsvrc_mean[2])

    # ***************************************************************
    # Send the image to the NCS
    # ***************************************************************
    graph.LoadTensor(img.astype(numpy.float16), 'user object')

    # ***************************************************************
    # Get the result from the NCS
    # ***************************************************************
    output, userobj = graph.GetResult()

    # ***************************************************************
    # Print the results of the inference form the NCS
    # ***************************************************************
    order = output.argsort()[::-1][:6]

    print('\n------- predictions --------')
    result = ""
    for i in range(0, 5):
        #print ('prediction ' + str(i) + ' (probability ' + str(output[order[i]]*100) + '%) is ' + labels[order[i]] + '  label index is: ' + str(order[i]) )
        label = re.search("n[0-9]+\s([^,]+)", labels[order[i]]).groups(1)[0]
        result = result + "\n%20s %0.2f %%" % (label, output[order[i]] * 100)

    # ***************************************************************
    # Clean up the graph and the device
    # ***************************************************************
    graph.DeallocateGraph()
    device.CloseDevice()

    return result, imgname
Exemple #7
0
def main():
    print("Loading system...")
    mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 2)

    # enumerate all devices
    devices = mvnc.enumerate_devices()
    if len(devices) == 0:
        print("No devices found")
        quit()

    # use the first device found
    device = mvnc.Device(devices[0])
    # open the device
    device.open()

    # load the model from the disk
    with open(GRAPH_PATH, mode="rb") as f:
        graph_in_memory = f.read()

    graph = mvnc.Graph(GRAPH_PATH)

    # create the input and output fifos
    fifo_in, fifo_out = graph.allocate_with_fifos(device, graph_in_memory)

    cap = cv2.VideoCapture(0)

    print("Starting capture...")
    try:
        while True:
            global start_time
            start_time = time.time()

            # read an image in bgr format
            ret, img = cap.read()
            original_img = img

            # bgr input scaling
            img = np.divide(img, 255.0)
            resized_img = cv2.resize(img, (416, 416), cv2.INTER_LINEAR)

            # transpose the image to rgb
            resized_img = resized_img[:, :, ::-1]
            resized_img = resized_img.astype(np.float32)

            # make an inference
            graph.queue_inference_with_fifo_elem(fifo_in, fifo_out,
                                                 resized_img, "user object")
            # get the result
            output, userobj = fifo_out.read_elem()

            # Tiny Yolo V2 requires post processing to filter out duplicate objects and low score objects
            # After post processing, the app will display the image and any detected objects
            post_processing(output, original_img)
    except KeyboardInterrupt:
        print("Closing, please wait...")
        pass

    # clean up
    cv2.destroyAllWindows()
    cv2.VideoCapture(0).release()
    fifo_in.destroy()
    fifo_out.destroy()
    graph.destroy()
    device.close()
    device.destroy()
    print("All done!")
def main():
    while True:
        vs=VideoStream(usePiCamera=True).start()
        time.sleep(1)
        print('Running NCS Caffe TinyYolo example')

    # Set logging level to only log errors
        mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 3)
###########################################################################
        devices = mvnc.enumerate_devices()
        if len(devices) == 0:
            print('No devices found')
            return 1
###########################################################################
        device = mvnc.Device(devices[0]) 
        device.open()

    #Load graph from disk and allocate graph via API
        with open(tiny_yolo_graph_file, mode='rb') as f:
            graph_from_disk = f.read()
        graph = mvnc.Graph("Tiny Yolo Graph")
############################################################################
        fifo_in, fifo_out = graph.allocate_with_fifos(device, graph_from_disk)

#may need another line here from cheat sheet

#       fifo_in, fifo_out = graph.allocate_with_fifos(device, graph_from_disk,
#           fifo_in_type=mvnc.FifoType.HOST_WO, fifo_in_data_type=mvnc.FifoDataType.FP32,
#       fifo_in_num_elem=2,
#           fifo_out_type=mvnc.FifoType.HOST_RO, fifo_out_data_type=mvnc.FifoDataType.FP32,
#       fifo_out_num_elem=2)

    # Read image from file, resize it to network width and height
    # save a copy in display_image for display, then convert to float32, normalize (divide by 255),
    # and finally convert to convert to float16 to pass to LoadTensor as input for an inference
    
        while True:
        
            frame=vs.read()# Get a frame from a video stream. 
            input_image=frame.copy()# copy frame to an input image. 
            display_image = cv2.resize(input_image, (NETWORK_IMAGE_WIDTH, NETWORK_IMAGE_HEIGHT), cv2.INTER_LINEAR)
            input_image = cv2.resize(input_image, (NETWORK_IMAGE_WIDTH, NETWORK_IMAGE_HEIGHT), cv2.INTER_LINEAR)
            input_image = input_image.astype(np.float32)
##############################################################################
            input_image = (255.0/input_image.max())
#could also be this 
	#   input_image = input_image/255
    	#   input_image[:] = ((input_image[:] )*(1.0/255.0))
            input_image = input_image[:, :, ::-1]  # convert to RGB

        #TODO: Use the queue_inference_with_fifo_elem to load the image and get the result from the NCS. This should be one line of code.
       	    graph.queue_inference_with_fifo_elem(fifo_in, fifo_out, input_image,'userobj')        

            output, userobj = fifo_out.read_elem()

        # filter out all the objects/boxes that don't meet thresholds
            filtered_objs = filter_objects(output.astype(np.float32), input_image.shape[1], input_image.shape[0])

##############################################################
	    save_box(filtered_objs,display_image)

            print('Displaying image with objects detected in GUI')
            print('Click in the GUI window and hit any key to exit')
        #display the filtered objects/boxes in a GUI window
            display_objects_in_gui(display_image, filtered_objs)

        fifo_in.destroy()
        fifo_out.destroy()
        graph.destroy()
        device.close()
        device.destroy()
	time.sleep(.5)
    print('Finished')
Exemple #9
0
def main():
    global gn_mean, gn_labels, actual_frame_height, actual_frame_width, TY_BOX_PROBABILITY_THRESHOLD, TY_MAX_IOU

    print_info()

    # Set logging level and initialize/open the first NCS we find
    mvnc.SetGlobalOption(mvnc.GlobalOption.LOG_LEVEL, 0)
    devices = mvnc.EnumerateDevices()
    if len(devices) < 2:
        print('This application requires two NCS devices.')
        print('Insert two devices and try again!')
        return 1
    ty_device = mvnc.Device(devices[0])
    ty_device.OpenDevice()

    gn_device = mvnc.Device(devices[1])
    gn_device.OpenDevice()

    #Load tiny yolo graph from disk and allocate graph via API
    try:
        with open(tiny_yolo_graph_file, mode='rb') as ty_file:
            ty_graph_from_disk = ty_file.read()
        ty_graph = ty_device.AllocateGraph(ty_graph_from_disk)
    except:
        print('Error - could not load tiny yolo graph file')
        ty_device.CloseDevice()
        gn_device.CloseDevice()
        return 1

    #Load googlenet graph from disk and allocate graph via API
    try:
        with open(googlenet_graph_file, mode='rb') as gn_file:
            gn_graph_from_disk = gn_file.read()
        gn_graph = gn_device.AllocateGraph(gn_graph_from_disk)
    except:
        print('Error - could not load googlenet graph file')
        ty_device.CloseDevice()
        gn_device.CloseDevice()
        return 1

    # GoogLenet initialization
    EXAMPLES_BASE_DIR = '../../'
    gn_mean = np.load(EXAMPLES_BASE_DIR +
                      'data/ilsvrc12/ilsvrc_2012_mean.npy').mean(1).mean(
                          1)  # loading the mean file

    gn_labels_file = EXAMPLES_BASE_DIR + 'data/ilsvrc12/synset_words.txt'
    gn_labels = np.loadtxt(gn_labels_file, str, delimiter='\t')
    for label_index in range(0, len(gn_labels)):
        temp = gn_labels[label_index].split(',')[0].split(' ', 1)[1]
        gn_labels[label_index] = temp

    # get list of all the .jpg files in the image directory
    input_video_filename_list = os.listdir(input_video_path)
    input_video_filename_list = [
        i for i in input_video_filename_list if i.endswith('.mp4')
    ]

    if (len(input_video_filename_list) < 1):
        # no images to show
        print('No .mp4 files found')
        return 1

    exit_app = False

    print('Starting GUI, press Q to quit')

    cv2.namedWindow(cv_window_name)
    cv2.waitKey(1)

    TY_MAX_IOU = 0.15
    TY_BOX_PROBABILITY_THRESHOLD = 0.13

    while (True):
        for input_video_file in input_video_filename_list:
            video_device = cv2.VideoCapture("./" + input_video_file)

            actual_frame_width = video_device.get(cv2.CAP_PROP_FRAME_WIDTH)
            actual_frame_height = video_device.get(cv2.CAP_PROP_FRAME_HEIGHT)
            print('actual video resolution: ' + str(actual_frame_width) +
                  ' x ' + str(actual_frame_height))

            if ((video_device == None) or (not video_device.isOpened())):
                print('Could not open video device.  Make sure file exists:')
                print('file name:' + input_video_file)
                print(
                    'Also, if you installed python opencv via pip or pip3 you')
                print(
                    'need to uninstall it and install from source with -D WITH_V4L=ON'
                )
                print('Use the provided script: install-opencv-from_source.sh')

            frame_count = 0
            start_time = time.time()

            while True:
                # Read image from video device,
                ret_val, input_image = video_device.read()
                if (not ret_val):
                    end_time = time.time()
                    print("No image from from video device, exiting")
                    break

                # resize image to network width and height
                # then convert to float32, normalize (divide by 255),
                # and finally convert to float16 to pass to LoadTensor as input
                # for an inference
                input_image = cv2.resize(
                    input_image,
                    (TY_NETWORK_IMAGE_WIDTH, TY_NETWORK_IMAGE_HEIGHT),
                    cv2.INTER_LINEAR)

                # save a display image as read from video device.
                display_image = input_image.copy()

                # modify input_image for TinyYolo input
                input_image = input_image[:, :, ::-1]  # convert to RGB
                input_image = input_image.astype(np.float32)
                input_image = np.divide(input_image, 255.0)

                # Load tensor and get result.  This executes the inference on the NCS
                ty_graph.LoadTensor(input_image.astype(np.float16),
                                    'user object')
                output, userobj = ty_graph.GetResult()

                # filter out all the objects/boxes that don't meet thresholds
                filtered_objs = filter_objects(output.astype(np.float32),
                                               input_image.shape[1],
                                               input_image.shape[0])

                get_googlenet_classifications(gn_graph, display_image,
                                              filtered_objs)

                # check if the window is visible, this means the user hasn't closed
                # the window via the X button
                prop_val = cv2.getWindowProperty(cv_window_name,
                                                 cv2.WND_PROP_ASPECT_RATIO)
                if (prop_val < 0.0):
                    end_time = time.time()
                    exit_app = True
                    break

                overlay_on_image(display_image, filtered_objs)

                # resize back to original size so image doesn't look squashed
                # It might be better to resize the boxes to match video dimensions
                # and overlay them directly on the video image returned from video device.
                display_image = cv2.resize(
                    display_image,
                    (int(actual_frame_width), int(actual_frame_height)),
                    cv2.INTER_LINEAR)
                # update the GUI window with new image
                cv2.imshow(cv_window_name, display_image)

                raw_key = cv2.waitKey(1)
                if (raw_key != -1):
                    if (handle_keys(raw_key) == False):
                        end_time = time.time()
                        exit_app = True
                        break

                frame_count = frame_count + 1

            frames_per_second = frame_count / (end_time - start_time)
            print('File: ' + input_video_file)
            print('Frames per Second: ' + str(frames_per_second))

            # close video device
            video_device.release()

            if (exit_app):
                break
        if (exit_app):
            break

    # clean up tiny yolo
    ty_graph.DeallocateGraph()
    ty_device.CloseDevice()

    # Clean up googlenet
    gn_graph.DeallocateGraph()
    gn_device.CloseDevice()

    print('Finished')
def main():
    cv_window_name = "SSD MobileNet - hit any key to exit"

    devices = mvnc.enumerate_devices()
    if len(devices) == 0:
        print('No devices found')
        quit()

    device = mvnc.Device(devices[0])
    device.open()
    graph_file_name = 'test.graph'

    with open(graph_file_name, mode='rb') as f:
        graph_in_memory = f.read()

    graph = mvnc.Graph(graph_file_name)
    fifoIn, fifoOut = graph.allocate_with_fifos(device, graph_in_memory)

    # read the image to run an inference on from the disk
    while True:
        f_list = []
        for i, j, filename_list in os.walk(jpath):
            f_list = filename_list
        si1 = len(f_list)
        time.sleep(1.2)
        f9_list = []
        for i, j, filename99_list in os.walk(jpath):
            f9_list = filename99_list
        si2 = len(f9_list)
        if si1 == si2:
            for f in f_list:
                temjpg = jpath + f
                ncsjpg = tmpth + f
                if os.path.isfile(temjpg):
                    mymovefile(temjpg, ncsjpg)
                    infer_image = cv2.imread(ncsjpg)
                    nn_list = f.split('.')
                    nf = nn_list[0]
                    try:
                        run_inference(infer_image, graph, fifoIn, fifoOut, nf)
                    except:
                        os.remove(ncsjpg)
                        continue
                    else:
                        f1_list = []
                        for e, r, filename2_list in os.walk(tmpth):
                            f1_list = filename2_list
                        if len(f1_list) > 1:
                            for ff in f1_list:
                                srcfile = tmpth + ff
                                dstfile = goal_path + ff
                                mymovefile(srcfile, dstfile)
                        else:
                            os.remove(ncsjpg)
                else:
                    continue
        else:
            continue

    # display the results and wait for user to hit a key
    #cv2.imshow(cv_window_name, infer_image)
    #cv2.imwrite("./0833/")
    #cv2.waitKey(0)

    # Clean up the graph and the device
    graph.destroy()
    fifoIn.destroy()
    fifoOut.destroy()
    device.close()
def main():
    global resize_output, resize_output_width, resize_output_height

    if (not handle_args()):
        print_usage()
        return 1

    mvnc.SetGlobalOption(mvnc.GlobalOption.LOG_LEVEL, 2)

    devices = mvnc.EnumerateDevices()
    if len(devices) == 0:
        print('No devices found')
        quit()

    device = mvnc.Device(devices[0])
    device.OpenDevice()

    graph_filename = 'graph'
    with open(graph_filename, mode='rb') as f:
        graph_data = f.read()

    graphnet = device.AllocateGraph(graph_data)

    # template = cv2.imread('template.png',0)
    # template = cv2.resize(template, (NETWORK_IMAGE_WIDTH, NETWORK_IMAGE_HEIGHT))
    # orb = cv2.ORB_create()
    # kp1, des1 = orb.detectAndCompute(template,None)
    cv2.namedWindow(cv_window_name)
    cv2.moveWindow(cv_window_name, 10, 10)

    exit_app = False

    cap = cv2.VideoCapture(0)

    actual_frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    actual_frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    print('actual video resolution: ' + str(actual_frame_width) + ' x ' +
          str(actual_frame_height))

    if ((cap == None) or (not cap.isOpened())):
        print('Could not open video device. ')
        exit_app = True

    frame_count = 0
    start_time = time.time()
    end_time = start_time

    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

    ##  Calibration Matrix:
    ##[[ 516.14758188    0.          314.02546443]
    ## [   0.          515.76615942  250.15817809]
    ## [   0.            0.            1.        ]]
    ##Disortion:  [[  2.48041485e-01  -6.31759025e-01   4.36060601e-04  -1.48720850e-03
    ##    5.17810257e-01]]
    ##total error:  0.021112667972552
    mtx = numpy.matrix([[516.14758188, 0, 314.02546443],
                        [0, 515.76615942, 250.15817809], [0, 0, 1]])
    disto = numpy.matrix([[
        2.48041485e-01, -6.31759025e-01, 4.36060601e-04, -1.48720850e-03,
        5.17810257e-01
    ]])

    nrows = 7
    ncols = 7
    dimension = 9

    choice = input(
        'Enter Y to run camera calibration, press enter to continue:')
    if choice.upper() == 'Y':
        ret, mtx, disto, rvecs, tvecs = run_camera_calibration(
            cap, nrows, ncols, dimension)
        if not ret:
            print('failed to calibrate')
            exit_app = True

    print('mtx', mtx)
    print('disto', disto)
    ret, img = cap.read()
    h, w = img.shape[:-1]
    newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, disto, (w, h), 1,
                                                      (w, h))

    #mapx, mapy = cv2.initUndistortRectifyMap(mtx, disto, None, newcameramtx, (w, h), 5)

    while (True):
        if (exit_app):
            break
        ret, image = cap.read()
        if (not ret):
            end_time = time.time()
            print("No image from from video device, exiting")
            break

        display_imagec = cv2.undistort(image, mtx, disto, None, newcameramtx)
        x, y, w, h = roi
        display_imagec = display_imagec[y:y + h, x:x + w]

        # check if user hasn't closed the window
        prop_val = cv2.getWindowProperty(cv_window_name,
                                         cv2.WND_PROP_ASPECT_RATIO)
        if (prop_val < 0.0):
            end_time = time.time()
            exit_app = True
            break


##################################################################################################
        display_image = cv2.cvtColor(display_imagec, cv2.COLOR_BGR2GRAY)
        #corners = cv2.goodFeaturesToTrack(display_image,81, 0.1, 10)

        ret, corners = cv2.findChessboardCorners(display_image, (ncols, nrows),
                                                 None)

        if ret != True:
            print('No much found')
            continue

        try:
            corners = sorted(numpy.int0(corners).tolist(), key=lambda x: x)
        except:
            continue
        xp = int(actual_frame_width / 2)
        yp = int(actual_frame_height / 2)
        cv2.circle(display_image, (xp, yp), 10, [150, 0, 0], -1)
        ii = 0
        print('######################################')

        sstart = True
        udx = 0
        for corner in corners:
            [x, y] = corner[0]
            if sstart:
                corner[0].append(0)
                xp = x
                yp = y
                sstart = False
                continue
            udx += (x - xp)
            corner[0].append(x - xp)
            xp = x
            yp = y
        print('###################################### udx : ', udx, cnt,
              len(corners))
        udx /= len(corners)

        print('###################################### udx : ', udx)

        ii = 0
        jj = 0
        sstart = True

        cpx = [[[]]]
        for corner in corners:
            if sstart:
                sstart = False
                continue
            if corner[0][2] < udx:
                cpx[ii].append(corner[0])
                [x, y, dx] = corner[0]
                print(ii, ',', x, ',', y, ',', dx)
            else:
                ii += 1
                cpx.append([corner[0]])
                [x, y, dx] = corner[0]
                print(ii, ',', x, ',', y, ',', dx)

            cv2.circle(display_image, (x, y), 3, 2000, -1)
        print('######################################')

        ###################################################################################################
        # kp2, des2 = orb.detectAndCompute(display_imagec,None)
        # matches = bf.match(des1,des2)
        # matches = sorted(matches, key = lambda x:x.distance)
        # display_image = cv2.drawMatches(template,kp1,display_imagec,kp2,matches[:36],None, flags=4)
        ####################################################################################################
        #run_inference(display_image, graphnet)
        #if (resize_output):
        #display_image = cv2.resize(display_image,(resize_output_width, resize_output_height), cv2.INTER_LINEAR)
        ####################################################################################################
        cv2.imshow(cv_window_name, display_image)

        raw_key = cv2.waitKey(1)
        if (raw_key != -1):
            if (handle_keys(raw_key) == False):
                end_time = time.time()
                exit_app = True
                break
        frame_count += 1

    frames_per_second = frame_count / (end_time - start_time)
    print('Frames per Second: ' + str(frames_per_second))

    cap.release()

    # Clean up the graph and the device
    graphnet.DeallocateGraph()
    device.CloseDevice()

    cv2.destroyAllWindows()
Exemple #12
0
def inferencer(results, frameBuffer, ssd_detection_mode, face_detection_mode, devnum, mp_active_stick_number, mp_stick_temperature):

    graphs = []
    graph_buffers = []
    graphHandles = []
    graphHandle0 = None
    graphHandle1 = None

    mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 4)
    devices = mvnc.enumerate_devices()
    if len(devices) == 0:
        print("No NCS devices found")
        sys.exit(1)
    print(len(devices))

    # 1:= Enabled MobileNet-SSD Model
    if ssd_detection_mode == 1:
        with open(join(graph_folder, "graph"), mode="rb") as f:
            graph_buffers.append(f.read())
        graphs.append(mvnc.Graph('MobileNet-SSD'))

    # 1:= Enabled Fullweight FaceDetection Model
    if face_detection_mode == 1:
        with open(join(graph_folder, "graph.fullfacedetection"), mode="rb") as f:
            graph_buffers.append(f.read())
        graphs.append(mvnc.Graph('FullFaceDetection'))

    # 2:= Enabled Lightweight FaceDetection Model
    if face_detection_mode == 2:
        with open(join(graph_folder, "graph.shortfacedetection"), mode="rb") as f:
            graph_buffers.append(f.read())
        graphs.append(mvnc.Graph('ShortFaceDetection'))

    devopen = False
    for device in devices:
        try:
            device = mvnc.Device(device)
            device.open()
            for (graph, graph_buffer) in zip(graphs, graph_buffers):
                graphHandles.append(graph.allocate_with_fifos(device, graph_buffer))
            devopen = True
            break
        except:
            continue

    if devopen == False:
        print("NCS Devices open Error!!!")
        sys.exit(1)

    print("Loaded Graphs!!! ")

    THERMAL_STATS = mvnc.DeviceOption.RO_THERMAL_STATS
    temperature = device.get_option

    while True:
        # 0:= Inactive stick, 1:= Active stick
        if mp_active_stick_number[devnum] == 0:
            continue

        # Measure the temperature inside the stick
        mp_stick_temperature[devnum] = temperature(THERMAL_STATS)[0]

        try:
            if frameBuffer.empty():
                continue

            color_image = frameBuffer.get()
            prepimg = preprocess_image(color_image)
            res = None
            for (graph, graphHandle) in zip(graphs, graphHandles):
                graphHandle0 = graphHandle[0]
                graphHandle1 = graphHandle[1]
                graph.queue_inference_with_fifo_elem(graphHandle0, graphHandle1, prepimg.astype(np.float32), None)
                out, _ = graphHandle1.read_elem()
                num_valid_boxes = int(out[0])
                if num_valid_boxes > 0:
                    if isinstance(res, type(None)):
                        res = [out]
                    else:
                        res = np.append(res, [out], axis=0)
            results.put(res)
        except:
            import traceback
            traceback.print_exc()
def main():
    # 設定程式參數
    arg_parser = argparse.ArgumentParser(description='使用 Movidius 進行預測')
    arg_parser.add_argument(
        '--graph-file',
        required=True,
        help='Movidius 模型檔',
    )
    arg_parser.add_argument(
        '--video-type',
        choices=['file', 'camera'],
        default='camera',
        help='影片類型',
    )
    arg_parser.add_argument(
        '--source',
        default='/dev/video0',
        help='影片來源檔',
    )
    arg_parser.add_argument(
        '--input-width',
        type=int,
        default=48,
        help='模型輸入影像寬度',
    )
    arg_parser.add_argument(
        '--input-height',
        type=int,
        default=48,
        help='模型輸入影像高度',
    )
    arg_parser.add_argument(
        '--gui',
        action='store_true',
        help='啓用圖像界面',
    )

    # 解讀程式參數
    args = arg_parser.parse_args()
    assert args.input_width > 0 and args.input_height > 0

    # 設置 Movidius 裝置
    mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 2)
    mvnc_devices = mvnc.enumerate_devices()

    if not mvnc_devices:
        print('找不到 Movidius 裝置')
        exit(1)

    mvnc_dev = mvnc.Device(mvnc_devices[0])
    mvnc_dev.open()

    # 載入模型檔
    try:
        with open(args.graph_file, mode='rb') as file_graph:
            graph_buffer = file_graph.read()
    except (FileNotFoundError, IOError):
        print('無法載入模型檔')
        exit(1)

    graph = mvnc.Graph('graph')
    fifo_in, fifo_out = graph.allocate_with_fifos(mvnc_dev, graph_buffer)

    # 開啓影片來源
    if args.video_type == 'file':  # 檔案
        video_dev = cv2.VideoCapture(args.source)
        video_width = video_dev.get(cv2.CAP_PROP_FRAME_WIDTH)
        video_height = video_dev.get(cv2.CAP_PROP_FRAME_HEIGHT)

    elif args.video_type == 'camera':  # 攝影機
        video_dev = cv2.VideoCapture(0)

    # 主迴圈
    try:
        prev_timestamp = time.time()

        while True:
            ret, orig_image = video_dev.read()
            curr_time = time.localtime()

            # 檢查串流是否結束
            if ret is None or orig_image is None:
                break

            # 縮放爲模型輸入的維度、調整數字範圍爲 0~1 之間的數值
            resized_image = cv2.resize(
                orig_image,
                (args.input_width, args.input_height),
            ).astype(np.float32)
            normalized_image = resized_image / 255.0

            # 執行預測
            graph.queue_inference_with_fifo_elem(
                fifo_in,
                fifo_out,
                normalized_image,
                None,
            )
            result_onehot, _ = fifo_out.read_elem()

            left_score, right_score, stop_score, other_score = result_onehot
            class_id = np.argmax(result_onehot)

            if class_id == 0:
                class_str = 'left'
            elif class_id == 1:
                class_str = 'right'
            elif class_id == 2:
                class_str = 'stop'
            elif class_id == 3:
                class_str = 'other'

            # 計算執行時間
            recent_timestamp = time.time()
            period = recent_timestamp - prev_timestamp
            prev_timestamp = recent_timestamp

            print('時間:%02d:%02d:%02d ' %
                  (curr_time.tm_hour, curr_time.tm_min, curr_time.tm_sec))
            print('輸出:%.2f %.2f %.2f %.2f' %
                  (left_score, right_score, stop_score, other_score))
            print('類別:%s' % class_str)
            print('費時:%f' % period)
            print()

            # 顯示圖片
            if args.gui:
                cv2.imshow('', orig_image)
                cv2.waitKey(1)

    except KeyboardInterrupt:
        print('使用者中斷')

    # 終止影像裝置
    video_dev.release()

    # 終止 Movidius 裝置
    fifo_in.destroy()
    fifo_out.destroy()
    graph.destroy()
    mvnc_dev.close()
    mvnc_dev.destroy()
Exemple #14
0
def main():
    print('test start')
    func()
    return
    print('Running NCS Caffe TinyYolo example')

    # Set logging level to only log errors
    mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 3)
    devices = mvnc.enumerate_devices()
    if len(devices) == 0:
        print('No devices found')
        return 1
    device = mvnc.Device(devices[0])
    device.open()

    #Load graph from disk and allocate graph via API
    with open(tiny_yolo_graph_file, mode='rb') as f:
        graph_from_disk = f.read()
    graph = mvnc.Graph("Tiny Yolo Graph")
    fifo_in, fifo_out = graph.allocate_with_fifos(device, graph_from_disk)

    # Read image from file, resize it to network width and height
    # save a copy in display_image for display, then convert to float32, normalize (divide by 255),
    # and finally convert to convert to float16 to pass to LoadTensor as input for an inference
    cv_window_name='123'

    while (True):
        video_device = cv2.VideoCapture("./" + input_image_file)
        actual_frame_width = video_device.get(cv2.CAP_PROP_FRAME_WIDTH)
        actual_frame_height = video_device.get(cv2.CAP_PROP_FRAME_HEIGHT)
        print ('actual video resolution: ' + str(actual_frame_width) + ' x ' + str(actual_frame_height))
#        if ((video_device == None) or (not video_device.isOpened())):
#            print ('Could not open video device.  Make sure file exists:')
#            print ('file name:' + input_video_file)
#            print ('Also, if you installed python opencv via pip or pip3 you')
#            print ('need to uninstall it and install from source with -D WITH_V4L=ON')
#            print ('Use the provided script: install-opencv-from_source.sh')

       # frame_count = 0
       # start_time = time.time()

#frame_count = frame_count + 1

#frames_per_second = frame_count / (end_time - start_time)


        while True :
            print ('Use the provided script: install-opencv-from_source.sh')
            time1  = time.time()
            ret_val, input_image = video_device.read()

        time2  = time.time()

        start_time = time.time()


        display_image = input_image



        input_image = cv2.resize(input_image, (NETWORK_IMAGE_WIDTH, NETWORK_IMAGE_HEIGHT), cv2.INTER_LINEAR)
        input_image = input_image.astype(np.float32)
        input_image = np.divide(input_image, 255.0)
        input_image = input_image[:, :, ::-1]  # convert to RGB

        time3  = time.time()
        # Load tensor and get result.  This executes the inference on the NCS
        graph.queue_inference_with_fifo_elem(fifo_in, fifo_out, input_image.astype(np.float32), None)
        output, userobj = fifo_out.read_elem()

        # filter out all the objects/boxes that don't meet thresholds
        filtered_objs = filter_objects(output.astype(np.float32), input_image.shape[1], input_image.shape[0])

        print('Displaying image with objects detected in GUI')
        print('Click in the GUI window and hit any key to exit')
        #display the filtered objects/boxes in a GUI window
        display_objects_in_gui(display_image, filtered_objs)



        time4  = time.time()










        end_time = time.time()
        time_used=end_time-start_time
        print("time used : "+ str(time_used)+"time point:"+str(time1)+" "+str(time2)+" "+str(time3)+" "+str(time4))
        cv2.imshow(cv_window_name, input_image)
        #raw_key = cv2.waitKey(1)

        if (not ret_val):
             #   end_time = time.time()
                print("No image from from video device, exiting")
                break

             # resize image to network width and height
             # then convert to float32, normalize (divide by 255),
             # and finally convert to float16 to pass to LoadTensor as input
             # for an inference
       # input_image = cv2.resize(input_image, (TY_NETWORK_IMAGE_WIDTH, TY_NETWORK_IMAGE_HEIGHT), cv2.INTER_LINEAR)



             # save a display image as read from video device.

        # close video device
        video_device.release()














    input_image = cv2.imread(input_image_file)
    display_image = input_image



    input_image = cv2.resize(input_image, (NETWORK_IMAGE_WIDTH, NETWORK_IMAGE_HEIGHT), cv2.INTER_LINEAR)
    input_image = input_image.astype(np.float32)
    input_image = np.divide(input_image, 255.0)
    input_image = input_image[:, :, ::-1]  # convert to RGB

    # Load tensor and get result.  This executes the inference on the NCS
    graph.queue_inference_with_fifo_elem(fifo_in, fifo_out, input_image.astype(np.float32), None)
    output, userobj = fifo_out.read_elem()

    # filter out all the objects/boxes that don't meet thresholds
    filtered_objs = filter_objects(output.astype(np.float32), input_image.shape[1], input_image.shape[0])

    print('Displaying image with objects detected in GUI')
    print('Click in the GUI window and hit any key to exit')
    #display the filtered objects/boxes in a GUI window
    display_objects_in_gui(display_image, filtered_objs)

    fifo_in.destroy()
    fifo_out.destroy()
    graph.destroy()
    device.close()
    device.destroy()
    print('Finished')
def main():
    """Main function for the program.  Everything starts here.

    :return: None
    """
    global resize_output, resize_output_width, resize_output_height, \
           resize_output, resize_output_width, resize_output_height, \
           device_count


    if (not handle_args()):
        print_usage()
        return 1

    # get list of all the .mp4 files in the image directory
    input_video_filename_list = os.listdir(input_video_path)
    input_video_filename_list = [i for i in input_video_filename_list if i.endswith('.mp4')]
    if (len(input_video_filename_list) < 1):
        # no images to show
        print('No video (.mp4) files found')
        return 1

    resting_image = cv2.imread("resting_image.png")
    if (resting_image is None):
        resting_image = numpy.zeros((800, 600, 3), numpy.uint8)

    if (resize_output):
        resting_image = cv2.resize(resting_image,
                                   (resize_output_width, resize_output_height),
                                   cv2.INTER_LINEAR)

    # Set logging level to only log errors
    mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 3)

    devices = mvnc.enumerate_devices()
    if len(devices) < 1:
        print('No NCS device detected.')
        print('Insert device and try again!')
        return 1

    if (device_count < 1) or (device_count > len(devices)):
        device_count = len(devices)


    # Create an object detector processor for each device that opens
    # and store it in our list of processors
    obj_detect_list = list()
    idle_obj_detect_list = list()

    device_number = 0

    for one_device in devices:
        try:
            obj_detect_dev = mvnc.Device(one_device)
            obj_detect_dev.open()
            print("opened device " + str(device_number))
            obj_detector_proc = Yolov2_tiny_Processor(NETWORK_GRAPH_FILENAME, obj_detect_dev,
                                                      inital_box_prob_thresh=min_score_percent / 100.0,
                                                      classification_mask=object_classifications_mask,
                                                      name="object detector " + str(device_number))
            if (device_number < device_count):
                obj_detect_list.append(obj_detector_proc)
            else:
                idle_obj_detect_list.append(obj_detector_proc)

            device_number += 1

        except:
            print("Could not open device " + str(device_number) + ", trying next device")
            pass


    if len(obj_detect_list) < 1:
        print('Could not open any NCS devices.')
        print('Reinsert devices and try again!')
        return 1

    print("Using " + str(len(obj_detect_list)) + " devices for object detection")
    print_hot_keys()

    cv2.namedWindow(cv_window_name)
    cv2.moveWindow(cv_window_name, 10,  10)
    cv2.waitKey(1)

    exit_app = False
    while (True):
        for input_video_file in input_video_filename_list :

            for one_obj_detect_proc in obj_detect_list:
                print("using object detector: " + one_obj_detect_proc.get_name())
                one_obj_detect_proc.drain_queues()

            # video processor that will put video frames images on the object detector's input FIFO queue
            video_proc = VideoProcessor(input_video_path + '/' + input_video_file,
                                        network_processor_list = obj_detect_list)
            video_proc.start_processing()

            frame_count = 0
            start_time = time.time()
            last_throttle_time = start_time
            end_time = start_time
            uptime=time.time()
            while(True):
                done = False
                for one_obj_detect_proc in obj_detect_list:
                    try:
                        (filtered_objs, display_image) = one_obj_detect_proc.get_async_inference_result()
                        print("resive result:",time.time()-uptime)
                        uptime=time.time()
                    except :
                        print("exception caught in main")
                        raise


                    # check if the window is visible, this means the user hasn't closed
                    # the window via the X button
                    prop_val = cv2.getWindowProperty(cv_window_name, cv2.WND_PROP_ASPECT_RATIO)
                    if (prop_val < 0.0):
                        end_time = time.time()
                        video_proc.stop_processing()
                        video_proc.cleanup()
                        exit_app = True
                        break

                    running_fps = frame_count / (time.time() - start_time)
                    overlay_on_image(display_image, filtered_objs, running_fps)
                    print("show time:",time.time()-uptime)

                    if (resize_output):
                        display_image = cv2.resize(display_image,
                                                   (resize_output_width, resize_output_height),
                                                   cv2.INTER_LINEAR)
                    cv2.imshow(cv_window_name, display_image)

                    raw_key = cv2.waitKey(1)
                    if (raw_key != -1):
                        if (handle_keys(raw_key, obj_detect_list) == False):
                            end_time = time.time()
                            exit_app = True
                            done = True
                            break

                    frame_count += 1

                    #if (one_obj_detect_proc.is_input_queue_empty()):
                    if (not video_proc.is_processing()):
                        # asssume the video is over.
                        end_time = time.time()
                        done = True
                        print('video processor not processing, assuming video is finished.')
                        break

                #if (frame_count % 100) == 0:
                if ((time.time() - last_throttle_time) > throttle_check_seconds):
                    #long movie, check for throttling devices
                    # throttling = one_obj_detect_proc.get_device().get_option(mvnc.DeviceOption.RO_THERMAL_THROTTLING_LEVEL)
                    last_throttle_time = time.time()
                    print("movie not done, but going a long time so adjust for throttling")
                    video_proc.pause()
                    do_throttle_adjustment(obj_detect_list, idle_obj_detect_list)
                    video_proc.unpause()

                if (done) : break

            frames_per_second = frame_count / (end_time - start_time)
            print('Frames per Second: ' + str(frames_per_second))

            # check for throttling devices and save in throttling list
            throttling_list = list()
            for one_obj_detect_proc in obj_detect_list:
                throttling = one_obj_detect_proc.get_device().get_option(mvnc.DeviceOption.RO_THERMAL_THROTTLING_LEVEL)
                if (throttling > 0):
                    print("\nDevice " + one_obj_detect_proc.get_name() + " is throttling, level is: " + str(throttling))
                    throttling_list.append(one_obj_detect_proc)


            if (not exit_app):
                # rest between movies, display an image while resting
                resting_display_image = cv2.resize(resting_image,
                                                   (display_image.shape[1], display_image.shape[0]),
                                                   cv2.INTER_LINEAR)
                cv2.imshow(cv_window_name, resting_display_image)

                if ((len(throttling_list) > len(idle_obj_detect_list))):
                    # more devices throttling than we have in the idle list
                    # so do extra rest by applying a multiplier to the rest time
                    print("throttling devices... resting")
                    cv2.waitKey(rest_seconds * 1000 * rest_throttling_multiplier)
                else:
                    cv2.waitKey(rest_seconds * 1000)

            # remove the throttling devices from the main list and put them at the end so they will
            # be moved to the idle list with priority
            for one_throttling in throttling_list:
                obj_detect_list.remove(one_throttling)
                obj_detect_list.append(one_throttling)


            num_idle = len(idle_obj_detect_list)
            if (num_idle > len(obj_detect_list)):
                num_idle = len(obj_detect_list)
            if (num_idle > 0):
                # replace one of the devices with an idle device
                for idle_index in range(0, num_idle):
                    #for one_idle_proc in idle_obj_detect_list:
                    obj_detect_list.insert(0, idle_obj_detect_list.pop(0))

                for idle_count in range(0, num_idle):
                    idle_obj_detect_list.append(obj_detect_list.pop())

            video_proc.stop_processing()
            video_proc.cleanup()

            if (exit_app):
                break

        if (exit_app):
            break


    # Clean up the graph and the device
    for one_obj_detect_proc in obj_detect_list:
        cv2.waitKey(1)
        one_obj_detect_proc.cleanup(True)

    cv2.destroyAllWindows()
Exemple #16
0
def main():

    global resize_output, resize_output_width, resize_output_height

    if (not handle_args()):
        print_usage()
        return 1

    mvnc.SetGlobalOption(mvnc.GlobalOption.LOG_LEVEL, 2)
    devices = mvnc.EnumerateDevices()
    if len(devices) == 0:
        print('No devices found')
        quit()
    device = mvnc.Device(devices[0])
    device.OpenDevice()
    graph_filename = 'caffe_ssd_mobilenet_graph'
    with open(os.path.join(Config.model_dir, graph_filename), mode='rb') as f:
        graph_data = f.read()
    ssd_mobilenet_graph = device.AllocateGraph(graph_data)

    cv2.namedWindow(cv_window_name)
    cv2.moveWindow(cv_window_name, 10, 10)

    exit_app = False
    list_cam = Config.get_usb_cam()

    while (True):

        if list_cam is not None and list_cam[0] != "":

            cam_index = int(list_cam[0][-1])
            cap = cv2.VideoCapture(cam_index)

            actual_frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
            actual_frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)

            if ((cap == None) or (not cap.isOpened())):
                print('Could not open usb cam')
                exit_app = True
                break

            while (True):

                ret, display_image = cap.read()

                if (not ret):
                    print("No image from from video device, exiting")
                    break

                # check if the window is visible, this means the user hasn't closed
                # the window via the X button
                prop_val = cv2.getWindowProperty(cv_window_name,
                                                 cv2.WND_PROP_ASPECT_RATIO)
                if (prop_val < 0.0):
                    exit_app = True
                    break

                run_inference(display_image, ssd_mobilenet_graph)

                if (resize_output):
                    display_image = cv2.resize(
                        display_image,
                        (resize_output_width, resize_output_height),
                        cv2.INTER_LINEAR)
                cv2.imshow(cv_window_name, display_image)

                raw_key = cv2.waitKey(1)
                if (raw_key != -1):
                    if (handle_keys(raw_key) == False):
                        exit_app = True
                        break

            cap.release()

            if (exit_app):
                break

        if (exit_app):
            break

    ssd_mobilenet_graph.DeallocateGraph()
    device.CloseDevice()
    cv2.destroyAllWindows()
Exemple #17
0
def main():
    vs = VideoStream(usePiCamera=True).start()
    time.sleep(1)
    print('Running NCS Caffe TinyYolo example')

    # Set logging level to only log errors
    mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 3)
    devices = mvnc.enumerate_devices(
    )  # TODO use the mvnc API to querydevices.
    if len(devices) == 0:
        print('No devices found')
        return 1
    device = mvnc.Device(
        devices[0]
    )  # TODO: use the mvnc API to assign the first device in devices to the device variable.
    device.open()

    #Load graph from disk and allocate graph via API
    with open(tiny_yolo_graph_file, mode='rb') as f:
        graph_from_disk = f.read()
    graph = mvnc.Graph("Tiny Yolo Graph")
    fifo_in, fifo_out = graph.allocate_with_fifos(
        device, graph_from_disk
    )  # TODO: Instantiate fifo_in and fifo_out using the graph file above.

    # Read image from file, resize it to network width and height
    # save a copy in display_image for display, then convert to float32, normalize (divide by 255),
    # and finally convert to convert to float16 to pass to LoadTensor as input for an inference

    while True:

        frame = vs.read()  # Get a frame from a video stream.
        input_image = frame.copy()  # copy frame to an input image.
        display_image = cv2.resize(input_image,
                                   (NETWORK_IMAGE_WIDTH, NETWORK_IMAGE_HEIGHT),
                                   cv2.INTER_LINEAR)
        input_image = cv2.resize(input_image,
                                 (NETWORK_IMAGE_WIDTH, NETWORK_IMAGE_HEIGHT),
                                 cv2.INTER_LINEAR)
        input_image = input_image.astype(np.float32)
        input_image = (input_image * (1.0 / 255.0)
                       )  # TODO: Scale values between 0 and 255
        input_image = input_image[:, :, ::-1]  # convert to RGB

        #TODO: Use the queue_inference_with_fifo_elem to load the image and get the result from the NCS. This should be one line of code.

        graph.queue_inference_with_fifo_elem(fifo_in, fifo_out,
                                             input_image.astype(np.float32),
                                             None)
        output, userobj = fifo_out.read_elem()

        # filter out all the objects/boxes that don't meet thresholds
        filtered_objs = filter_objects(output.astype(np.float32),
                                       input_image.shape[1],
                                       input_image.shape[0])

        print('Displaying image with objects detected in GUI')
        print('Click in the GUI window and hit any key to exit')
        #display the filtered objects/boxes in a GUI window
        display_objects_in_gui(display_image, filtered_objs)

    fifo_in.destroy()
    fifo_out.destroy()
    graph.destroy()
    device.close()
    device.destroy()
    print('Finished')
Exemple #18
0
    def __init__(self):
        super(MyMnistWindow, self).__init__()

        self.resize(284*2, 330*2)  # resize设置宽高
        self.move(100, 100)    # move设置位置 说明在哪个位置截图
        self.setWindowIcon(QIcon('./logo.ico'))
        self.setWindowTitle('计算棒测试-手写数字识别')
        self.setWindowFlags(Qt.FramelessWindowHint)  # 窗体无边框
        #setMouseTracking设置为False,否则不按下鼠标时也会跟踪鼠标事件
        self.setMouseTracking(False)

        self.pos_xy = []  #保存鼠标移动过的点

        # 添加一系列控件
        self.label_draw = QLabel('', self)

        self.label_draw.setStyleSheet("QLabel{background:rgb(255,255,255)}")
        self.label_draw.setGeometry(2, 2, 550, 550) # (x,y,width,height)
        self.label_draw.setStyleSheet("QLabel{border:1px solid black;}")
        self.label_draw.setAlignment(Qt.AlignCenter)

        self.label_result_name = QLabel('预测:', self)
        self.label_result_name.setGeometry(2, 570, 61, 35)
        self.label_result_name.setAlignment(Qt.AlignCenter)

        self.label_result = QLabel(' ', self)
        self.label_result.setGeometry(64, 570, 35, 35)
        self.label_result.setFont(QFont("Roman times", 8, QFont.Bold))
        self.label_result.setStyleSheet("QLabel{border:1px solid black;}")
        self.label_result.setAlignment(Qt.AlignCenter)

        self.btn_recognize = QPushButton("识别", self)
        self.btn_recognize.setGeometry(110, 570, 50, 35)
        self.btn_recognize.clicked.connect(self.btn_recognize_on_clicked)

        self.btn_clear = QPushButton("清空", self)
        self.btn_clear.setGeometry(170, 570, 50, 35)
        self.btn_clear.clicked.connect(self.btn_clear_on_clicked)

        self.btn_close = QPushButton("关闭", self)
        self.btn_close.setGeometry(230, 570, 50, 35)
        self.btn_close.clicked.connect(self.btn_close_on_clicked)

        # 时间
        self.label_time_name = QLabel('识别时间:', self)
        self.label_time_name.setGeometry(320, 570, 100, 35)
        self.label_time_name.setAlignment(Qt.AlignCenter)

        self.label_time = QLabel(' ', self)
        self.label_time.setGeometry(430, 570, 110, 35)
        self.label_time.setFont(QFont("Roman times", 8, QFont.Bold))
        self.label_time.setStyleSheet("QLabel{border:1px solid black;}")
        self.label_time.setAlignment(Qt.AlignCenter)

        # 计算棒信息
        self.label_ncs_name = QLabel('NCS状态:', self)
        self.label_ncs_name.setGeometry(2, 610, 100, 35)
        self.label_ncs_name.setAlignment(Qt.AlignCenter)

        self.label_ncs = QLabel(' ', self)
        self.label_ncs.setGeometry(110, 610, 430, 35)
        self.label_ncs.setFont(QFont("Roman times", 8, QFont.Bold))
        self.label_ncs.setStyleSheet("QLabel{border:1px solid black;}")
        self.label_ncs.setAlignment(Qt.AlignCenter)

        # 打开计算棒的设备
        mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL,2)
        #获取连接到主机系统的神经计算设备列表
        self.devices = mvnc.enumerate_devices()

        if len(self.devices) == 0:
            print("[INFO] 未发现计算棒的任何设备!")
            raise("[Error] No devices found!")
            # quit()

        #调用第一个NCS设备
        self.device = mvnc.Device(self.devices[0])
        print("[INFO] 打开的计算棒设备id:" + str(self.devices[0]))
        #打开通信
        self.device.open() 
        self.ncs_info = "NCS调用成功,device ID:" + str(self.devices[0]) 

        # 加载图
        with open(GRAPH_FILE,mode='rb') as f:
            self.graphFileBuff = f.read()

        #初始化一个名为graph_name的图
        self.graph = mvnc.Graph("alexnet") 

        #创建输入和输出先进先出队列,将图加载到设备
        self.input_fifo,self.output_fifo=self.graph.allocate_with_fifos(self.device,self.graphFileBuff) 
Exemple #19
0
def main():
    global gn_mean, gn_labels, input_image_filename_list

    print('Running NCS birds example')

    # get list of all the .jpg files in the image directory
    input_image_filename_list = os.listdir(input_image_path)
    input_image_filename_list = [
        input_image_path + '/' + i for i in input_image_filename_list
        if i.endswith('.jpg')
    ]

    if (len(input_image_filename_list) < 1):
        # no images to show
        print('No .jpg files found')
        return 1

    # Set logging level and initialize/open the first NCS we find
    mvnc.SetGlobalOption(mvnc.GlobalOption.LOG_LEVEL, 0)
    devices = mvnc.EnumerateDevices()
    if len(devices) < 2:
        print('This application requires two NCS devices.')
        print('Insert two devices and try again!')
        return 1
    ty_device = mvnc.Device(devices[0])
    ty_device.OpenDevice()

    gn_device = mvnc.Device(devices[1])
    gn_device.OpenDevice()

    #Load tiny yolo graph from disk and allocate graph via API
    with open(tiny_yolo_graph_file, mode='rb') as ty_file:
        ty_graph_from_disk = ty_file.read()
    ty_graph = ty_device.AllocateGraph(ty_graph_from_disk)

    #Load googlenet graph from disk and allocate graph via API
    with open(googlenet_graph_file, mode='rb') as gn_file:
        gn_graph_from_disk = gn_file.read()
    gn_graph = gn_device.AllocateGraph(gn_graph_from_disk)

    # GoogLenet initialization
    EXAMPLES_BASE_DIR = '../../'
    gn_mean = np.load(EXAMPLES_BASE_DIR +
                      'data/ilsvrc12/ilsvrc_2012_mean.npy').mean(1).mean(
                          1)  # loading the mean file

    gn_labels_file = EXAMPLES_BASE_DIR + 'data/ilsvrc12/synset_words.txt'
    gn_labels = np.loadtxt(gn_labels_file, str, delimiter='\t')
    for label_index in range(0, len(gn_labels)):
        temp = gn_labels[label_index].split(',')[0].split(' ', 1)[1]
        gn_labels[label_index] = temp

    print('Q to quit, or any key to advance to next image')

    cv2.namedWindow(cv_window_name)

    for input_image_file in input_image_filename_list:
        # Read image from file, resize it to network width and height
        # save a copy in img_cv for display, then convert to float32, normalize (divide by 255),
        # and finally convert to convert to float16 to pass to LoadTensor as input for an inference
        input_image = cv2.imread(input_image_file)

        # resize the image to be a standard width for all images and maintain aspect ratio
        STANDARD_RESIZE_WIDTH = 800
        input_image_width = input_image.shape[1]
        input_image_height = input_image.shape[0]
        standard_scale = float(STANDARD_RESIZE_WIDTH) / input_image_width
        new_width = int(
            input_image_width *
            standard_scale)  # this should be == STANDARD_RESIZE_WIDTH
        new_height = int(input_image_height * standard_scale)
        input_image = cv2.resize(input_image, (new_width, new_height),
                                 cv2.INTER_LINEAR)

        display_image = input_image
        input_image = cv2.resize(
            input_image, (TY_NETWORK_IMAGE_WIDTH, TY_NETWORK_IMAGE_HEIGHT),
            cv2.INTER_LINEAR)
        input_image = input_image[:, :, ::-1]  # convert to RGB
        input_image = input_image.astype(np.float32)
        input_image = np.divide(input_image, 255.0)

        # Load tensor and get result.  This executes the inference on the NCS
        ty_graph.LoadTensor(input_image.astype(np.float16), 'user object')
        output, userobj = ty_graph.GetResult()

        # filter out all the objects/boxes that don't meet thresholds
        filtered_objs = filter_objects(
            output.astype(np.float32), input_image.shape[1],
            input_image.shape[0])  # fc27 instead of fc12 for yolo_small

        get_googlenet_classifications(gn_graph, display_image, filtered_objs)

        # check if the window has been closed.  all properties will return -1.0
        # for windows that are closed. If the user has closed the window via the
        # x on the title bar then we will break out of the loop.  we are
        # getting property aspect ratio but it could probably be any property
        try:
            prop_asp = cv2.getWindowProperty(cv_window_name,
                                             cv2.WND_PROP_ASPECT_RATIO)
        except:
            break
        prop_asp = cv2.getWindowProperty(cv_window_name,
                                         cv2.WND_PROP_ASPECT_RATIO)
        if (prop_asp < 0.0):
            # the property returned was < 0 so assume window was closed by user
            break

        ret_val = display_objects_in_gui(display_image, filtered_objs)
        if (not ret_val):
            break

    # clean up tiny yolo
    ty_graph.DeallocateGraph()
    ty_device.CloseDevice()

    # Clean up googlenet
    gn_graph.DeallocateGraph()
    gn_device.CloseDevice()

    print('Finished')
    # Do mean substraction
    for i in range(3):
        img[:, :, i] = (img[:, :, i] - mean) * std

    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img = img.astype(numpy.float16)
    imgarr.append(img)

print("Processed ", len(imgarr), " images")

# *****************************************************************
# Open the device and load the graph into each of the devices
# *****************************************************************
for devnum in range(len(devices)):
    print("***********************************************")
    devHandle.append(mvnc.Device(devices[devnum]))
    devHandle[devnum].OpenDevice()

    opt = devHandle[devnum].GetDeviceOption(mvnc.DeviceOption.OPTIMISATIONLIST)
    print("Optimisations:")
    print(opt)

    graphHandle.append(devHandle[devnum].AllocateGraph(graph))
    graphHandle[devnum].SetGraphOption(mvnc.GraphOption.ITERATIONS, 1)
    iterations = graphHandle[devnum].GetGraphOption(
        mvnc.GraphOption.ITERATIONS)
    print("Iterations:", iterations)

print("***********************************************")
print("Loaded Graphs")
print("***********************************************\n\n\n")
def run_detection(camera_path, graph_filename, visualize, ros_enabled):

    global resize_output, resize_output_width, resize_output_height

    # configure the NCS
    mvnc.SetGlobalOption(mvnc.GlobalOption.LOG_LEVEL, 2)

    # Get a list of ALL the sticks that are plugged in
    devices = mvnc.EnumerateDevices()
    if len(devices) == 0:
        print('No devices found')
        quit()

    # Pick the first stick to run the network
    device = mvnc.Device(devices[0])

    # Open the NCS
    device.OpenDevice()

    # graph_filename = 'graph'

    # Load graph file to memory buffer
    with open(graph_filename, mode='rb') as f:
        graph_data = f.read()

    # allocate the Graph instance from NCAPI by passing the memory buffer
    ssd_mobilenet_graph = device.AllocateGraph(graph_data)

    pub = None
    sub = None

    if ros_enabled:
        from utils.ros_op import CameraSubscriber, DetectionPublisher
        pub = DetectionPublisher()
        sub = CameraSubscriber()

    cv2.namedWindow(cv_window_name)
    cv2.moveWindow(cv_window_name, 10, 10)

    exit_app = False
    while (True):

        if ros_enabled:
            cap = sub.get_frame()
        else:
            cap = WebcamVideoStream(src=camera_path).start()

        actual_frame_width = cap.stream.get(3)
        actual_frame_height = cap.stream.get(4)

        print('actual video resolution: ' + str(actual_frame_width) + ' x ' +
              str(actual_frame_height))

        if ((cap == None) or (not cap.is_running())):
            print('Could not open camera device')
            print('device path:' + camera_path)
            exit_app = True
            break

        frame_count = 0
        start_time = time.time()
        end_time = start_time

        while (cap.is_running()):
            ret, display_image = cap.read()

            if (not ret):
                end_time = time.time()
                print("No image from from video device, exiting")
                break

            # check if the window is visible, this means the user hasn't closed
            # the window via the X button
            prop_val = cv2.getWindowProperty(cv_window_name,
                                             cv2.WND_PROP_ASPECT_RATIO)
            if (prop_val < 0.0):
                end_time = time.time()
                exit_app = True
                break

            run_inference(display_image, ssd_mobilenet_graph, visualize, pub)

            if (resize_output):
                display_image = cv2.resize(
                    display_image, (resize_output_width, resize_output_height),
                    cv2.INTER_LINEAR)
            cv2.imshow(cv_window_name, display_image)

            raw_key = cv2.waitKey(1)
            if (raw_key != -1):
                if (handle_keys(raw_key) == False):
                    end_time = time.time()
                    exit_app = True
                    break
            frame_count += 1

        frames_per_second = frame_count / (end_time - start_time)
        print('Frames per Second: ' + str(frames_per_second))

        cap.stop()

        if (exit_app):
            break

    # Clean up the graph and the device
    ssd_mobilenet_graph.DeallocateGraph()
    device.CloseDevice()

    cv2.destroyAllWindows()
Exemple #22
0
def main():
    # name of the opencv window
    # cv_window_name = "SSD MobileNet - hit any key to exit"

    #start publisher

    rospy.init_node('SSD_node', anonymous=True)

    pub = rospy.Publisher('ssd_output', Float32MultiArray, queue_size=2)

    # Get a list of ALL the sticks that are plugged in
    # we need at least one
    devices = mvnc.enumerate_devices()
    if len(devices) == 0:
        print('No devices found')
        quit()

    # Pick the first stick to run the network
    device = mvnc.Device(devices[0])

    # Open the NCS
    device.open()

    graph = mvnc.Graph('graph1')

    # The graph file that was created with the ncsdk compiler
    graph_file_name = '/home/owl/src/ncs-ros/graph'

    # read in the graph file to memory buffer
    with open(graph_file_name, mode='rb') as f:
        graph_in_memory = f.read()

    # create the NCAPI graph instance from the memory buffer containing the graph file.
    inputfifo, outputfifo = graph.allocate_with_fifos(device, graph_in_memory)

    cam = cv2.VideoCapture(0)
    if not cam.isOpened():
        print('error: camera not opened')
    #below property not supported by this opencv implementation.
    # cam.set(cv2.CAP_PROP_BUFFERSIZE , 1)

    # images for testing
    for filename in os.listdir('./images'):
        image = cv2.imread('images/' + filename)
        print(filename)
        # image = cv2.imread('images/img00000.jpg')
        output = run_inference(image, graph, inputfifo, outputfifo)
        #message will be in format described above
        msg = Float32MultiArray()
        msg.data = output
        print(output)
        pub.publish(msg)

        cv2.imshow('mobilenet', image)
        cv2.waitKey(0)

    # img_counter = 0
    # while not rospy.is_shutdown():
    #     ret, image = cam.read()
    #     print(img_counter)
    #     img_counter += 1
    #     #run network
    #     output = run_inference(image, graph, inputfifo, outputfifo)
    #     #message will be in format described above
    #     msg = Float32MultiArray()
    #     msg.data = output
    #     print(output)
    #     pub.publish(msg)

    #     cv2.imwrite('out_imgs/'+str(img_counter)+'.jpg', image)

    #     # display the results
    #     cv2.imshow('mobilenet', image)
    #     cv2.waitKey(1)

    # Clean up the graph and the device
    graph.destroy()
    inputfifo.destroy()
    outputfifo.destroy()
    device.close()
    device.destroy()
def main():
    """Main function for the program.  Everything starts here.

    :return: None
    """
    global resize_output, resize_output_width, resize_output_height, \
           obj_detector_proc, resize_output, resize_output_width, resize_output_height, video_proc, \
           last_num_persons, asyncImWriter

    if (not handle_args()):
        print_usage()
        return 1

    # Set logging level to only log errors
    mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 3)

    devices = mvnc.enumerate_devices()
    if len(devices) < 1:
        print('No NCS device detected.')
        print('Insert device and try again!')
        return 1

    # Pick the first stick to run the network
    # use the first NCS device that opens for the object detection.
    dev_count = 0
    for one_device in devices:
        try:
            obj_detect_dev = mvnc.Device(one_device)
            obj_detect_dev.open()
            print("opened device " + str(dev_count))
            break
        except:
            print("Could not open device " + str(dev_count) +
                  ", trying next device")
            pass
        dev_count += 1

    cv2.namedWindow(cv_window_name)
    cv2.moveWindow(cv_window_name, 10, 10)
    cv2.waitKey(1)

    obj_detector_proc = SsdMobileNetProcessor(
        NETWORK_GRAPH_FILENAME,
        obj_detect_dev,
        inital_box_prob_thresh=min_score_percent / 100.0,
        classification_mask=object_classifications_mask)

    exit_app = False
    while (True):
        # video processor that will put video frames images on the object detector's input FIFO queue
        video_proc = CameraProcessor(CAMERA_INDEX,
                                     1920,
                                     1080,
                                     network_processor=obj_detector_proc)
        video_proc.start_processing()

        frame_count = 0
        start_time = time.time()
        end_time = start_time

        while (True):
            try:
                (filtered_objs, display_image
                 ) = obj_detector_proc.get_async_inference_result()
            except:
                print("exception caught in main")
                raise

            # check if the window is visible, this means the user hasn't closed
            # the window via the X button
            prop_val = cv2.getWindowProperty(cv_window_name,
                                             cv2.WND_PROP_ASPECT_RATIO)
            if (prop_val < 0.0):
                end_time = time.time()
                video_proc.stop_processing()
                exit_app = True
                break

            agg_results = overlay_on_image(display_image, filtered_objs)
            num_persons = len(agg_results)

            if (show_output):
                if (resize_output):
                    display_image = cv2.resize(
                        display_image,
                        (resize_output_width, resize_output_height),
                        cv2.INTER_LINEAR)
                cv2.imshow(cv_window_name, display_image)

                raw_key = cv2.waitKey(1)
                if (raw_key != -1):
                    if (handle_keys(raw_key, obj_detector_proc) == False):
                        end_time = time.time()
                        exit_app = True
                        video_proc.stop_processing()
                        break

            frame_count += 1

            # if (obj_detector_proc.is_input_queue_empty()):
            #     end_time = time.time()
            #     print('Neural Network Processor has nothing to process, assuming video is finished.')
            #     break

        frames_per_second = frame_count / (end_time - start_time)
        print('Frames per Second: ' + str(frames_per_second))

        throttling = obj_detect_dev.get_option(
            mvnc.DeviceOption.RO_THERMAL_THROTTLING_LEVEL)
        if (throttling > 0):
            print("\nDevice is throttling, level is: " + str(throttling))
            print("Sleeping for a few seconds....")
            cv2.waitKey(2000)

        #video_proc.stop_processing()
        cv2.waitKey(1)
        if (exit_app):
            video_proc.cleanup()
            break

    # Clean up the graph and the device
    obj_detector_proc.cleanup()
    obj_detect_dev.close()
    obj_detect_dev.destroy()

    cv2.destroyAllWindows()
Exemple #24
0
def main():
    url_cam = 'rr+tcp://192.168.43.141:2355?service=Webcam'
    url_drive = 'rr+tcp://192.168.43.141:2356?service=Drive'

    # Connect to camera and start streaming on global current_frame variable
    # p,cam = connect_camera(url_cam)
    cam = connect_camera2(url_cam)

    # Connect to motors to drive
    car = connect_drive(url_drive)

    ## Check Mividius Device
    devices = mvnc.EnumerateDevices()
    if len(devices) == 0:
        print('No devices found')
        quit()

    device = mvnc.Device(devices[0])
    device.OpenDevice()

    # Load Deep Neural Network graph
    with open('./graph', mode='rb') as f:
        graphfile = f.read()
    # Send graph to the device
    graph = device.AllocateGraph(graphfile)

    # Image parameters
    image_size = np.array([120, 160])
    top_cutoff = 40

    is_view = False
    if is_view:
        cv2.namedWindow("Image")  # Debug

    try:
        prev_time = time.time()
        iteration_times = []
        predict_times = []
        prep_times = []

        while True:
            #Just loop resetting the frame
            #This is not ideal but good enough for demonstration.
            current_frame = WebcamImageToMat(cam.CaptureFrame())
            if not current_frame is None:
                # Use image from now on to prevent unknown updates on current frame.
                frame = current_frame

                # print("Get-image: "+str(time.time() - prev_time))
                # Resize image
                hei_original, wid_original = frame.shape[0:2]

                if image_size[0] != hei_original or image_size[
                        1] != wid_original:
                    frame = cv2.resize(frame, (image_size[1], image_size[0]),
                                       interpolation=cv2.INTER_NEAREST)
                # Crop image
                frame = frame[top_cutoff:, :, :]

                # print("Preprocess0: "+str(time.time() - prev_time))

                # Convert color due to openCV defaults BGR to RGB
                img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

                # print("Preprocess1: "+str(time.time() - prev_time))
                # Convert image to float16 and normalize so that device can process
                img = img.astype(np.float16) / 255.0

                #print("Preprocess: "+str(time.time() - prev_time))
                prep_times.append(time.time() - prev_time)

                # Send image to the device, and Calculate d and phi
                graph.LoadTensor(img, 'user object')
                output, userobj = graph.GetResult()
                output = output.astype(np.float32)
                # Extract d and phi
                d = output[0]
                phi = output[1]
                print('Output(d,phi): ' + str(output))

                # If d or phi could not be find, stop the car.
                if d is None or phi is None:
                    # car.setWheelsSpeed(0,0)
                    prev_time = time.time()
                    continue

                # Calculate w(omega) and v from d and phi
                v, w = lane_controller(d, phi)
                #print(v,w)

                # Calculate inverse kinematics to find each wheel speed
                vel_right, vel_left = inverse_kinematics(v, w)
                # print("vel_left: " + str(vel_left) + ", vel_right: " + str(vel_right))

                # print("Predict: "+str(time.time() - prev_time))
                predict_times.append(time.time() - prev_time)

                # Drive the car
                car.setWheelsSpeed(vel_left, vel_right)
                # car.setWheelsSpeed(0,0)

                # Calculate passed time rate
                duration_rate = (time.time() - prev_time)
                iteration_times.append(duration_rate)
                prev_time = time.time()

                # Print the fps
                print("Rate: " + str(1.0 / duration_rate))

                # View for Debug
                if is_view:
                    frame = cv2.resize(frame, (640, 320))
                    cv2.imshow("Image-with-lines", frame)
                    if cv2.waitKey(1) != -1:
                        break
    except KeyboardInterrupt:
        print('Interrupted!')

    # Convert fps values to numpy array
    iteration_times = np.array(iteration_times)
    avr_times = np.mean(iteration_times)

    prep_times = np.array(prep_times)
    avr_prep_times = np.mean(prep_times)

    predict_times = np.array(predict_times)
    avr_predict_times = np.mean(predict_times)

    # Print average fps
    print("Avr. (get image + preprocess) time: " + str(avr_prep_times * 1000) +
          ' ms')
    print("Avr. (prediction + motor speed control) time: " +
          str((avr_predict_times - avr_prep_times) * 1000) + ' ms')
    print("Avr. (set wheels speed) time: " +
          str((avr_times - avr_predict_times) * 1000) + ' ms')
    print("Avr. loop rate: " + str(1.0 / avr_times) + ' FPS')

    print('Shutting Down..')
    car.setWheelsSpeed(0, 0)
    # disconnect_camera(p,cam)

    # Clear and Disconnect Movidius device
    graph.DeallocateGraph()
    device.CloseDevice()
    print('Finished')
Exemple #25
0
        camera.vflip = True
        camera.hflip = True
        camera.resolution = (720, 720)
        camera.shutter_speed = 5000
        camera.iso = 800

        time.sleep(2)
        print('Initialized...')

        talk('init.wav')

        ncs_names = ncs.EnumerateDevices()
        if (len(ncs_names) < 1):
            print("Error - no NCS devices detected.")
            sys.exit(1)
        dev = ncs.Device(ncs_names[0])

        dev.OpenDevice()

        with open('inception_v3.graph', 'rb') as f:
            graph = dev.AllocateGraph(f.read())

        while True:
            start = time.time()
            print("start_time:", start)

            with tempfile.TemporaryDirectory() as temp_path:
                filepath = os.path.join(temp_path, 'image.jpg')
                snapshot(camera, filepath)

                elapsed_time_snapshot = time.time() - start
Exemple #26
0
def main111():
    # name of the opencv window
    cv_window_name = "SSD MobileNet - hit key 'q' to exit"

    # Get a list of ALL the sticks that are plugged in
    # we need at least one
    devices = mvnc.EnumerateDevices()
    if len(devices) == 0:
        print('No devices found')
        quit()

    # Pick the first stick to run the network
    device = mvnc.Device(devices[0])

    # Open the NCS
    device.OpenDevice()

    # The graph file that was created with the ncsdk compiler
    # graph_file_name = 'new_graph'

    # read in the graph file to memory buffer
    with open(graph_file_name, mode='rb') as f:
        graph_in_memory = f.read()

    # create the NCAPI graph instance from the memory buffer containing the graph file.
    graph = device.AllocateGraph(graph_in_memory)

    # read the image to run an inference on from the disk

    #####################################################
    # Create a VideoCapture object and read from input file
    # If the input is the camera, pass 0 instead of the video file name

    #cap = cv2.VideoCapture('/data/darknet/video/192.168.10.66_01_20160923193103251.mp4')
    cap = cv2.VideoCapture(video_file)

    # Check if camera opened successfully
    if (cap.isOpened() == False):
        print("Error opening video stream or file")

    # Read until video is completed
    while (cap.isOpened()):
        loop_start = time.time()
        # Capture frame-by-frame
        ret, frame = cap.read()
        if ret == True:

            # Display the resulting frame
            # cv2.imshow('Frame',frame)
            # run a single inference on the image and overwrite the
            # boxes and labels
            time_start = time.time()
            run_inference(frame, graph)
            fps = 1 / (time.time() - time_start)
            print("FPS=" + str(fps))

        #cv2.HoughLinesP
        #cv2.namedWindow("SSD-Mobilenet",0);
        #cv2.resizeWindow("SSD-Mobilenet", 640, 480);
        #cv2.imshow(cv_window_name, frame)

        # display the results and wait for user to hit a key
        #cv2.waitKey(0)

        # Press Q on keyboard to  exit
        if cv2.waitKey(25) & 0xFF == ord('q'):
            break
        loop_time = time.time() - loop_start
        print("loop_fps=" + str(1 / loop_time))

    # When everything done, release the video capture object
    cap.release()

    # Closes all the frames
    cv2.destroyAllWindows()

    #####################################################
    # Clean up the graph and the device
    graph.DeallocateGraph()
    device.CloseDevice()
Exemple #27
0
def main():
    skip_classifier = True
    frame_interval = 1  # Number of frames after which to run face detection
    fps_display_interval = 10  # seconds
    frame_rate = 0
    frame_count = 0
    start_time = time.time()

    parser = get_parser()
    args = parser.parse_args()

    use_classifier = bool(args.classifier)

    devices = mvnc.enumerate_devices()
    if len(devices) == 0:
        print('No devices found')
        quit()
    device = mvnc.Device(devices[0])
    device.open()

    print('Load PNET')

    pnets = []
    for r in parse_resolutions(args.resolutions):
        p = PNetHandler(device, r[0], r[1])
        pnets.append(p)

    print('Load RNET')

    with open('movidius/rnet.graph', mode='rb') as f:
        rgraphFileBuff = f.read()
    rnetGraph = mvnc.Graph("RNet Graph")
    rnetIn, rnetOut = rnetGraph.allocate_with_fifos(device, rgraphFileBuff)

    print('Load ONET')

    with open('movidius/onet.graph', mode='rb') as f:
        ographFileBuff = f.read()
    onetGraph = mvnc.Graph("ONet Graph")
    onetIn, onetOut = onetGraph.allocate_with_fifos(device, ographFileBuff)

    if use_classifier:
        print('Load FACENET')

        with open(args.graph, mode='rb') as f:
            fgraphFileBuff = f.read()
        fGraph = mvnc.Graph("Face Graph")
        fifoIn, fifoOut = fGraph.allocate_with_fifos(device, fgraphFileBuff)

        # Load classifier
        with open(args.classifier, 'rb') as f:
            opts = {'file': f}
            if six.PY3:
                opts['encoding'] = 'latin1'
            (model, class_names) = pickle.load(**opts)

    threshold = [0.6, 0.6, 0.7]  # three steps's threshold

    bounding_boxes = []
    labels = []

    with tf.Session() as sess:
        pnets_proxy = []
        for p in pnets:
            pnets_proxy.append(p.proxy())

        def _rnet_proxy(img):
            rnetGraph.queue_inference_with_fifo_elem(rnetIn, rnetOut, img,
                                                     'rnet')
            output, userobj = rnetOut.read_elem()
            return output

        def _onet_proxy(img):
            onetGraph.queue_inference_with_fifo_elem(onetIn, onetOut, img,
                                                     'onet')
            output, userobj = onetOut.read_elem()
            return output

        pnets_proxy, rnet, onet = detect_face.create_movidius_mtcnn(
            sess, 'align', pnets_proxy, _rnet_proxy, _onet_proxy)
        frame_src = cv2.imread(args.image).astype(np.float32)
        if (frame_src.shape[1] != 640) or (frame_src.shape[0] != 480):
            frame_src = cv2.resize(frame_src, (640, 480),
                                   interpolation=cv2.INTER_AREA)
        try:
            while True:
                frame = frame_src.copy()
                # BGR -> RGB
                rgb_frame = frame[:, :, ::-1]
                # print("Frame {}".format(frame.shape))

                if (frame_count % frame_interval) == 0:
                    bounding_boxes, _ = detect_face.movidius_detect_face(
                        rgb_frame, pnets_proxy, rnet, onet, threshold)
                    detected = len(bounding_boxes) > 0

                    # Check our current fps
                    end_time = time.time()
                    if (end_time - start_time) > fps_display_interval:
                        frame_rate = float(frame_count) / (end_time -
                                                           start_time)
                        start_time = time.time()
                        frame_count = 0
                        if detected:
                            print('Full FPS: {}'.format(frame_rate))
                        else:
                            print('Pure FPS: {}'.format(frame_rate))
                if use_classifier:
                    imgs = get_images(rgb_frame, bounding_boxes)
                    labels = []
                    for img_idx, img in enumerate(imgs):
                        img = img.astype(np.float32)
                        fGraph.queue_inference_with_fifo_elem(
                            fifoIn, fifoOut, img, 'user object')
                        output, userobj = fifoOut.read_elem()
                        if not skip_classifier:
                            try:
                                output = output.reshape(1, model.shape_fit_[1])
                                predictions = model.predict_proba(output)
                            except ValueError as e:
                                # Can not reshape
                                print(
                                    "ERROR: Output from graph doesn't consistent"
                                    " with classifier model: %s" % e)
                                continue
                            best_class_indices = np.argmax(predictions, axis=1)
                            best_class_probabilities = predictions[
                                np.arange(len(best_class_indices)),
                                best_class_indices]

                            for i in range(len(best_class_indices)):
                                bb = bounding_boxes[img_idx].astype(int)
                                text = '%.1f%% %s' % (
                                    best_class_probabilities[i] * 100,
                                    class_names[best_class_indices[i]])
                                labels.append({
                                    'label': text,
                                    'left': bb[0],
                                    'top': bb[1] - 5
                                })

                add_overlays(frame,
                             bounding_boxes,
                             int(frame_rate),
                             labels=labels)

                frame_count += 1
                if args.image_out is not None:
                    cv2.imwrite(args.image_out, frame)
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

        except (KeyboardInterrupt, SystemExit) as e:
            print('Caught %s: %s' % (e.__class__.__name__, e))

    if use_classifier:
        fifoIn.destroy()
        fifoOut.destroy()
        fGraph.destroy()
    rnetIn.destroy()
    rnetOut.destroy()
    rnetGraph.destroy()
    onetIn.destroy()
    onetOut.destroy()
    onetGraph.destroy()
    for p in pnets:
        p.destroy()
    device.close()
    print('Finished')
pnet_graph_filename = './p2838.graph'
onet_graph_filename = './o.graph'

with open(pnet_graph_filename, mode='rb') as rf:
    pgraphfile = rf.read()

with open(onet_graph_filename, mode='rb') as rf:
    ographfile = rf.read()

#mvnc.SetGlobalOption(mvnc.GlobalOption.LOGLEVEL, 2)
devices = mvnc.EnumerateDevices()
if len(devices) < 2:
    print('No enough devices found')
    quit()

device = mvnc.Device(devices[0])
device.OpenDevice()
device1 = mvnc.Device(devices[1])
device1.OpenDevice()

pgraph = device.AllocateGraph(pgraphfile)
ograph = device1.AllocateGraph(ographfile)


def imresample(img, sz):
    # @UndefinedVariable
    im_data = cv2.resize(img, (sz[1], sz[0]), interpolation=cv2.INTER_AREA)
    return im_data


def detect_face(img, threshold=[0.405, 0.8473], factor=0.709):
Exemple #29
0
    box = Gtk.Box()
    box.set_spacing(5)
    box.set_orientation(Gtk.Orientation.VERTICAL)
    window.add(box)

    box.pack_start(viewsink, True, True, 0)
    output = OutputWidget()
    box.pack_start(output, False, True, 0)

    # Initialize Ncs device

    if verbose:
        print("Opening device")

    dev = fx.Device(args.dest)
    try:
        dev.OpenDevice()
    except Exception as e:
        print("Failed to open NCS. Is the device plugged in?")
        sys.exit(1)

    if verbose:
        print("Loading graph")

    graph = dev.AllocateGraph(network.get_graph_binary())

    # Open UI after device initialization

    if verbose:
        print("Opening UI")
def main():
    global time1
    global time2
    global time3
    global time4
    # name of the opencv window
    cv_window_name = "SSD MobileNet - hit any key to exit"

    # Get a list of ALL the sticks that are plugged in
    # we need at least one
    devices = mvnc.EnumerateDevices()
    if len(devices) == 0:
        print('No devices found')
        quit()

    # Pick the first stick to run the network
    device = mvnc.Device(devices[0])

    # Open the NCS
    device.OpenDevice()

    # The graph file that was created with the ncsdk compiler
    graph_file_name = cwd + '/../../models/caffe/MobileNetSSD_deploy.graph'

    # read in the graph file to memory buffer
    with open(graph_file_name, mode='rb') as f:
        graph_in_memory = f.read()

    # create the NCAPI graph instance from the memory buffer containing the graph file.
    graph = device.AllocateGraph(graph_in_memory)

    # read the image to run an inference on from the disk
    avg1 = 0
    avg2 = 0
    avg3 = 0
    for img in img_array:
        time1 = time.time()
        infer_image = cv2.imread(img)

        # run a single inference on the image and overwrite the
        # boxes and labels
        time2 = time.time()
        run_inference(infer_image, graph)

        # display the results and wait for user to hit a key
        # cv2.imshow(cv_window_name, infer_image)
        # cv2.waitKey(0)

        # cv2.imwrite("nanga.jpg",infer_image)
        # print("Image read: "+str(time2-time1)+"\nInference: "+str(time3-time2)+"\nVisualization: "+str(time4-time3))
        avg1 += time2 - time1
        avg2 += time3 - time2
        avg3 += time4 - time3
    avg1 /= len(img_array)
    avg2 /= len(img_array)
    avg3 /= len(img_array)
    print("Image read: " + str(avg1) + "\nInference: " + str(avg2) +
          "\nVisualization: " + str(avg3))

    # Clean up the graph and the device
    graph.DeallocateGraph()
    device.CloseDevice()