Exemplo n.º 1
0
def main():
    vs=VideoStream(usePiCamera=True).start()
    time.sleep(1)
    print('Running NCS Caffe TinyYolo example')

    # Set logging level to only log errors
    mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 3)
    devices = #TODO mvnc.enumerate_devices()
    if len(devices) == 0:
        print('No devices found')
        return 1
    device = #TODO:mvnc.Device(devices[0])   use the mvnc API to assign the first device in devices to the device variable. 
    device.open()

    #Load graph from disk and allocate graph via API
    with open(tiny_yolo_graph_file, mode='rb') as f:
        graph_from_disk = f.read()
    graph = mvnc.Graph("Tiny Yolo Graph")
    fifo_in, fifo_out = #TODO: graph.allocate_with_fifos(device, graph_file_buffer) 
    #may need another line here 

    #Instantiate fifo_in and fifo_out using the graph file above. 

    # Read image from file, resize it to network width and height
    # save a copy in display_image for display, then convert to float32, normalize (divide by 255),
    # and finally convert to convert to float16 to pass to LoadTensor as input for an inference
    
    while True:
        
        frame=vs.read()# Get a frame from a video stream. 
        input_image=frame.copy()# copy frame to an input image. 
        display_image = cv2.resize(input_image, (NETWORK_IMAGE_WIDTH, NETWORK_IMAGE_HEIGHT), cv2.INTER_LINEAR)
        input_image = cv2.resize(input_image, (NETWORK_IMAGE_WIDTH, NETWORK_IMAGE_HEIGHT), cv2.INTER_LINEAR)
        input_image = input_image.astype(np.float32)
        input_image = # TODO: Scale values between 0 and 255   *= (255.0/image.max())
        input_image = input_image[:, :, ::-1]  # convert to RGB

        #TODO: graph.queue_inference_with_fifo_elem(input_fifo, output_fifo, input_image, 'user object') 
        #Use the queue_inference_with_fifo_elem to load the image and get the result from the NCS. This should be one line of code.
        
        output, userobj = fifo_out.read_elem()

        # filter out all the objects/boxes that don't meet thresholds
        filtered_objs = filter_objects(output.astype(np.float32), input_image.shape[1], input_image.shape[0])

        print('Displaying image with objects detected in GUI')
        print('Click in the GUI window and hit any key to exit')
        #display the filtered objects/boxes in a GUI window
        display_objects_in_gui(display_image, filtered_objs)

    fifo_in.destroy()
    fifo_out.destroy()
    graph.destroy()
    device.close()
    device.destroy()
    print('Finished')
Exemplo n.º 2
0
def get_mvnc_device():
    mvncapi.global_set_option(mvncapi.GlobalOption.RW_LOG_LEVEL, 0)
    devices = mvncapi.enumerate_devices()
    if (len(devices) < 1):
        print("Error - no NCS devices detected")
        quit()
    dev = mvncapi.Device(devices[0])
    try:
        dev.open()
    except:
        print("Error - Could not open NCS device.")
        quit()
    return dev
def inferencer(results, frameBuffer):

    graph = None
    graphHandle0 = None
    graphHandle1 = None

    mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 4)
    devices = mvnc.enumerate_devices()
    if len(devices) == 0:
        print("No NCS devices found")
        sys.exit(1)
    print(len(devices))

    with open(join(graph_folder, "graph"), mode="rb") as f:
        graph_buffer = f.read()
    graph = mvnc.Graph('MobileNet-SSD')

    devopen = False
    for devnum in range(len(devices)):
        try:
            device = mvnc.Device(devices[devnum])
            device.open()
            graphHandle0, graphHandle1 = graph.allocate_with_fifos(
                device, graph_buffer)
            devopen = True
            break
        except:
            continue

    if devopen == False:
        print("NCS Devices open Error!!!")
        sys.exit(1)

    print("Loaded Graphs!!! " + str(devnum))

    while True:
        try:
            if frameBuffer.empty():
                continue

            color_image = frameBuffer.get()
            prepimg = preprocess_image(color_image)
            graph.queue_inference_with_fifo_elem(graphHandle0, graphHandle1,
                                                 prepimg.astype(np.float32),
                                                 color_image)
            out, _ = graphHandle1.read_elem()
            results.put(out)
        except:
            import traceback
            traceback.print_exc()
Exemplo n.º 4
0
def assign_ncs_devices():
    '''
    use the first NCS device for tiny YOLO processing, and the rest for GoogLeNet processing
    '''
    global ty_device

    mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 3)
    devices = mvnc.enumerate_devices()
    ty_device = mvnc.Device(devices[0])
    ty_device.open()

    if not init_gn_lists(devices[1:]):
        print('Error while initializing NCS devices for GoogLeNet')
    return True
def open_ncs_device(number_of_devices=1):
    # Configure the NCS verbosity reports
    mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 3)
    # Look for enumerated NCS device(s); quit program if none found.
    device_list = mvnc.enumerate_devices()
    if len(device_list) < number_of_devices:
        messagebox.showerror("NCS Error", "Not enough devices available")
        return
    device1 = mvnc.Device(device_list[0])
    device1.open()
    if number_of_devices   == 1:
        return device1
    elif number_of_devices == 2:
        device2 = mvnc.Device(device_list[1])
        device2.open()
        return device1, device2
Exemplo n.º 6
0
def assign_ncs_devices():
    '''
    use the first NCS device for tiny YOLO processing, and the rest for GoogLeNet processing
    '''
    global device

    mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 3)
    devices = mvnc.enumerate_devices()

    if len(devices) < 1:
        print('This application requires an NCS device.')
        print('Insert one and try again!')
        return 1

    device = mvnc.Device(devices[0])
    device.open()
Exemplo n.º 7
0
def init():
    global g_device
    global g_graph
    global g_fifo_in
    global g_fifo_out
    print('init start\n')
    # Set logging level to only log errors
    mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 3)
    devices = mvnc.enumerate_devices()
    if len(devices) == 0:
        print('No devices found')
        return 1
    g_device = mvnc.Device(devices[0])
    g_device.open()

    #Load graph from disk and allocate graph via API
    with open(tiny_yolo_graph_file, mode='rb') as f:
        graph_from_disk = f.read()
    g_graph = mvnc.Graph("Tiny Yolo Graph")
    g_fifo_in, g_fifo_out = g_graph.allocate_with_fifos(g_device, graph_from_disk)
    print('init end')
Exemplo n.º 8
0
    def __init__(self, _gp, _dt, _iou, _lab):
        """
        Example arguments:
        GRAPH_PATH = "../files/main.graph"
        DETECTION_THRESHOLD = 0.20
        IOU_THRESHOLD = 0.20
        LABELS = {0: "bg", 1: "dandelion"}
        """

        self.GRAPH_PATH = _gp
        self.DETECTION_THRESHOLD = _dt
        self.IOU_THRESHOLD = _iou
        self.LABELS = _lab

        mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 2)

        # enumerate all devices
        devices = mvnc.enumerate_devices()
        if len(devices) == 0:
            print("No devices found") if DEBUG else 0
            quit()

        # use the first device found
        self.device = mvnc.Device(devices[0])
        # open the device
        self.device.open()

        # load the model from the disk
        with open(self.GRAPH_PATH, mode="rb") as f:
            graph_in_memory = f.read()

        self.graph = mvnc.Graph(self.GRAPH_PATH)

        # create the input and output fifos
        self.fifo_in, self.fifo_out = self.graph.allocate_with_fifos(
            self.device, graph_in_memory)

        self.cap = cv2.VideoCapture(0)
Exemplo n.º 9
0
def do_initialize():
    # Set logging level to only log errors

    mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 3)
    devices = mvnc.enumerate_devices()
    if len(devices) == 0:
        print('No devices found')
        return 1
    device = mvnc.Device(devices[0])
    device.open()

    graph_filename = "inference.graph"

    # Load graph file
    try:
        with open(graph_filename, mode='rb') as f:
            in_memory_graph = f.read()
    except:
        print("Error reading graph file: " + graph_filename)
    graph = mvnc.Graph("mnist graph")
    fifo_in, fifo_out = graph.allocate_with_fifos(device, in_memory_graph)

    return device, graph, fifo_in, fifo_out
Exemplo n.º 10
0
    def __init__(self):
        # set the logging level for the NC API
        mvncapi.global_set_option(mvncapi.GlobalOption.RW_LOG_LEVEL,
                                  mvncapi.LogLevel.DEBUG)
        # get a list of names for all the devices plugged into the system
        device_list = mvncapi.enumerate_devices()
        if not device_list:
            raise Exception("Error - No neural compute devices detected.")

        else:
            print(len(device_list), "neural compute devices found!")

        # Get a list of valid device identifiers
        device_list = mvncapi.enumerate_devices()
        # Create a Device instance for the first device found
        self._device = mvncapi.Device(device_list[0])
        # Open communication with the device
        # try to open the device.  this will throw an exception if someone else
        # has it open already
        try:
            self._device.open()
            print("Hello NCS! Device opened normally.")
        except Exception:
            raise Exception("Error - Could not open NCS device.")
def main():
    """Main function for the program.  Everything starts here.

    :return: None
    """
    global resize_output, resize_output_width, resize_output_height, \
           obj_detector_proc, resize_output, resize_output_width, resize_output_height, video_proc, \
           last_num_persons, asyncImWriter

    if (not handle_args()):
        print_usage()
        return 1

    # Set logging level to only log errors
    mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 3)

    devices = mvnc.enumerate_devices()
    if len(devices) < 1:
        print('No NCS device detected.')
        print('Insert device and try again!')
        return 1

    # Pick the first stick to run the network
    # use the first NCS device that opens for the object detection.
    dev_count = 0
    for one_device in devices:
        try:
            obj_detect_dev = mvnc.Device(one_device)
            obj_detect_dev.open()
            print("opened device " + str(dev_count))
            break
        except:
            print("Could not open device " + str(dev_count) +
                  ", trying next device")
            pass
        dev_count += 1

    cv2.namedWindow(cv_window_name)
    cv2.moveWindow(cv_window_name, 10, 10)
    cv2.waitKey(1)

    obj_detector_proc = SsdMobileNetProcessor(
        NETWORK_GRAPH_FILENAME,
        obj_detect_dev,
        inital_box_prob_thresh=min_score_percent / 100.0,
        classification_mask=object_classifications_mask)

    exit_app = False
    while (True):
        # video processor that will put video frames images on the object detector's input FIFO queue
        video_proc = CameraProcessor(CAMERA_INDEX,
                                     1920,
                                     1080,
                                     network_processor=obj_detector_proc)
        video_proc.start_processing()

        frame_count = 0
        start_time = time.time()
        end_time = start_time

        while (True):
            try:
                (filtered_objs, display_image
                 ) = obj_detector_proc.get_async_inference_result()
            except:
                print("exception caught in main")
                raise

            # check if the window is visible, this means the user hasn't closed
            # the window via the X button
            prop_val = cv2.getWindowProperty(cv_window_name,
                                             cv2.WND_PROP_ASPECT_RATIO)
            if (prop_val < 0.0):
                end_time = time.time()
                video_proc.stop_processing()
                exit_app = True
                break

            agg_results = overlay_on_image(display_image, filtered_objs)
            num_persons = len(agg_results)

            if (show_output):
                if (resize_output):
                    display_image = cv2.resize(
                        display_image,
                        (resize_output_width, resize_output_height),
                        cv2.INTER_LINEAR)
                cv2.imshow(cv_window_name, display_image)

                raw_key = cv2.waitKey(1)
                if (raw_key != -1):
                    if (handle_keys(raw_key, obj_detector_proc) == False):
                        end_time = time.time()
                        exit_app = True
                        video_proc.stop_processing()
                        break

            frame_count += 1

            # if (obj_detector_proc.is_input_queue_empty()):
            #     end_time = time.time()
            #     print('Neural Network Processor has nothing to process, assuming video is finished.')
            #     break

        frames_per_second = frame_count / (end_time - start_time)
        print('Frames per Second: ' + str(frames_per_second))

        throttling = obj_detect_dev.get_option(
            mvnc.DeviceOption.RO_THERMAL_THROTTLING_LEVEL)
        if (throttling > 0):
            print("\nDevice is throttling, level is: " + str(throttling))
            print("Sleeping for a few seconds....")
            cv2.waitKey(2000)

        #video_proc.stop_processing()
        cv2.waitKey(1)
        if (exit_app):
            video_proc.cleanup()
            break

    # Clean up the graph and the device
    obj_detector_proc.cleanup()
    obj_detect_dev.close()
    obj_detect_dev.destroy()

    cv2.destroyAllWindows()
Exemplo n.º 12
0
from mvnc import mvncapi as mvnc
import sys
import argparse
import os
import numpy as np
import matplotlib as mpl

mpl.use('Agg')
import matplotlib.pyplot as plt

mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, mvnc.LogLevel.DEBUG)


def process_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--graph", required=True, help="graph file")
    parser.add_argument("--input",
                        required=True,
                        help="path to a input bin file")
    parser.add_argument("--output_dir",
                        required=True,
                        help="where to put output file")

    return parser.parse_args()


def load_input_tensor(file):
    #Load data
    inputs = np.fromfile(file, dtype=np.float32)
    inputs = np.reshape(inputs, [2, 1024])
Exemplo n.º 13
0
def main():
    print("Loading system...")
    mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 2)

    # enumerate all devices
    devices = mvnc.enumerate_devices()
    if len(devices) == 0:
        print("No devices found")
        quit()

    # use the first device found
    device = mvnc.Device(devices[0])
    # open the device
    device.open()

    # load the model from the disk
    with open(GRAPH_PATH, mode="rb") as f:
        graph_in_memory = f.read()

    graph = mvnc.Graph(GRAPH_PATH)

    # create the input and output fifos
    fifo_in, fifo_out = graph.allocate_with_fifos(device, graph_in_memory)

    cap = cv2.VideoCapture(0)

    print("Starting capture...")
    try:
        while True:
            global start_time
            start_time = time.time()

            # read an image in bgr format
            ret, img = cap.read()
            original_img = img

            # bgr input scaling
            img = np.divide(img, 255.0)
            resized_img = cv2.resize(img, (416, 416), cv2.INTER_LINEAR)

            # transpose the image to rgb
            resized_img = resized_img[:, :, ::-1]
            resized_img = resized_img.astype(np.float32)

            # make an inference
            graph.queue_inference_with_fifo_elem(fifo_in, fifo_out,
                                                 resized_img, "user object")
            # get the result
            output, userobj = fifo_out.read_elem()

            # Tiny Yolo V2 requires post processing to filter out duplicate objects and low score objects
            # After post processing, the app will display the image and any detected objects
            post_processing(output, original_img)
    except KeyboardInterrupt:
        print("Closing, please wait...")
        pass

    # clean up
    cv2.destroyAllWindows()
    cv2.VideoCapture(0).release()
    fifo_in.destroy()
    fifo_out.destroy()
    graph.destroy()
    device.close()
    device.destroy()
    print("All done!")
Exemplo n.º 14
0
class ObjectWrapper():
    # open device
    mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 2)
    devices = mvnc.enumerate_devices()
    devNum = len(devices)  # will have more than one probably
    print("Number of Movidius Sticks detected : ", devNum)
    if len(devices) == 0:
        print('No MVNC devices found')
        quit()
    devHandle = []  # used as device list - store devices
    graphHandle = []  # used as graph list - store graphs
    inputHandle = []
    outputHandle = []

    def __init__(self, graphfile):
        select = 1
        self.detector = YoloDetector(select)

        for i in range(
                ObjectWrapper.devNum):  # will loop for each device detected
            ObjectWrapper.devHandle.append(
                mvnc.Device(ObjectWrapper.devices[i])
            )  # pass in list of devices, append that device to device list.
            ObjectWrapper.devHandle[i].open()  # open that device.
            #opt = ObjectWrapper.devHandle[i].GetDeviceOption(mvnc.DeviceOption.OPTIMISATION_LIST)
            # load blob
            with open(graphfile, mode='rb') as f:
                blob = f.read()
            graph = mvnc.Graph('graph1')  # creates a graph instance

            # Allocate the graph and store to array
            #ObjectWrapper.graphHandle.append(graph.allocate(ObjectWrapper.devHandle[i], blob))
            input_fifo, output_fifo = graph.allocate_with_fifos(
                ObjectWrapper.devHandle[i], blob)

            ObjectWrapper.graphHandle.append(graph)
            ObjectWrapper.inputHandle.append(input_fifo)
            ObjectWrapper.outputHandle.append(output_fifo)

            self.dim = (416, 416)
            self.blockwd = 12
            self.wh = self.blockwd * self.blockwd
            self.targetBlockwd = 13
            self.classes = 1
            self.threshold = 0.2
            self.nms = 0.4

    #def __del__(self):

    def PrepareImage(self, img, dim):
        imgw = img.shape[1]
        imgh = img.shape[0]
        imgb = np.empty((dim[0], dim[1], 3))
        imgb.fill(0.5)

        if imgh / imgw > dim[1] / dim[0]:
            neww = int(imgw * dim[1] / imgh)
            newh = dim[1]
        else:
            newh = int(imgh * dim[0] / imgw)
            neww = dim[0]
        offx = int((dim[0] - neww) / 2)
        offy = int((dim[1] - newh) / 2)

        imgb[offy:offy + newh,
             offx:offx + neww, :] = resize(img.copy() / 255.0, (newh, neww), 1)
        im = imgb[:, :, (2, 1, 0)]
        return im, int(offx * imgw / neww), int(
            offy * imgh / newh), neww / dim[0], newh / dim[1]

    def Reshape(self, out, dim):
        shape = out.shape
        out = np.transpose(out.reshape(self.wh, int(shape[0] / self.wh)))
        out = out.reshape(shape)
        return out

    def Detect(self, img):
        print("DOING SINGLE DETECT")
        imgw = img.shape[1]
        imgh = img.shape[0]

        im, offx, offy, xscale, yscale = self.PrepareImage(img, self.dim)

        ####Edit
        ObjectWrapper.graphHandle[0].queue_inference_with_fifo_elem(
            ObjectWrapper.inputHandle[0], ObjectWrapper.outputHandle[0],
            im.astype(np.float32), 'user object')
        out, userobj = ObjectWrapper.outputHandle[0].read_elem(
        )  # Get result from output queue
        ####

        out = self.Reshape(out, self.dim)

        internalresults = self.detector.Detect(out.astype(np.float32),
                                               int(out.shape[0] / self.wh),
                                               self.blockwd, self.blockwd,
                                               self.classes, imgw, imgh,
                                               self.threshold, self.nms,
                                               self.targetBlockwd)
        pyresults = [
            BBox(x, xscale, yscale, offx, offy) for x in internalresults
        ]
        print(pyresults)
        return pyresults

    def Parallel(self, img):
        print("DOING PARALLEL")
        pyresults = {}
        for i in range(ObjectWrapper.devNum):
            im, offx, offy, w, h = self.PrepareImage(img[i], self.dim)
            # Edit
            ObjectWrapper.graphHandle[i].queue_inference_with_fifo_elem(
                ObjectWrapper.inputHandle[i], ObjectWrapper.outputHandle[0],
                im.astype(np.float32), 'user object')
            #ObjectWrapper.graphHandle[i].LoadTensor(im.astype(np.float16), 'user object')

        for i in range(ObjectWrapper.devNum):
            # Edit
            out, userobj = ObjectWrapper.outputHandle[i].read_elem(
            )  # Get result from output queue
            #out, userobj = ObjectWrapper.graphHandle[i].GetResult()

            out = self.Reshape(out, self.dim)
            imgw = img[i].shape[1]
            imgh = img[i].shape[0]
            internalresults = self.detector.Detect(out.astype(np.float32),
                                                   int(out.shape[0] / self.wh),
                                                   self.blockwd, self.blockwd,
                                                   self.classes, imgw, imgh,
                                                   self.threshold, self.nms,
                                                   self.targetBlockwd)
            res = [BBox(x, w, h, offx, offy) for x in internalresults]
            if i not in pyresults:
                pyresults[i] = res
        return pyresults
Exemplo n.º 15
0
class ObjectWrapper():
    mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 2)
    devices = mvnc.enumerate_devices()
    devNum = len(devices)
    if len(devices) == 0:
        print('No MVNC devices found')
        quit()
    devHandle = []
    graphHandle = []
    fifoInHandle = []
    fifoOutHandle = []

    def __init__(self, graphfile):
        select = 1
        self.detector = YoloDetector(select)
        for i in range(ObjectWrapper.devNum):
            ObjectWrapper.devHandle.append(
                mvnc.Device(ObjectWrapper.devices[i]))  ##------get devices
            ObjectWrapper.devHandle[i].open()  ##------ open device_i
            # load blob
            with open(graphfile, mode='rb') as f:
                blob = f.read()
            # create graph instance
            ObjectWrapper.graphHandle.append(mvnc.Graph('inst' + str(i)))
            # allocate resources
            fifoIn, fifoOut = ObjectWrapper.graphHandle[i].allocate_with_fifos(
                ObjectWrapper.devHandle[i], blob)
            ObjectWrapper.fifoInHandle.append(fifoIn)
            ObjectWrapper.fifoOutHandle.append(fifoOut)

        if (graphfile.endswith('416')):
            self.dim = (416, 416)
        elif (graphfile.endswith('288')):
            self.dim = (288, 288)
        elif (graphfile.endswith('352')):
            self.dim = (352, 352)
        else:
            self.dim = (416, 416)

        self.blockwd = int(self.dim[0] / 32)
        self.wh = self.blockwd * self.blockwd
        self.targetBlockwd = int(self.dim[0] / 32)
        self.classes = 6
        self.threshold = 0.35
        self.nms = 0.45

    def __del__(self):
        for i in range(ObjectWrapper.devNum):
            ObjectWrapper.fifoInHandle[i].destroy()
            ObjectWrapper.fifoOutHandle[i].destroy()
            ObjectWrapper.graphHandle[i].destroy()
            ObjectWrapper.devHandle[i].close()

    def PrepareImage(self, img, dim):
        '''
        imgw = img.shape[1]
        imgh = img.shape[0]
        imgb = np.empty((dim[0], dim[1], 3))
        imgb.fill(0.5)

        if imgh/imgw > dim[1]/dim[0]:
            neww = int(imgw * dim[1] / imgh)
            newh = dim[1]
        else:
            newh = int(imgh * dim[0] / imgw)
            neww = dim[0]
        offx = int((dim[0] - neww)/2)
        offy = int((dim[1] - newh)/2)

        imgb[offy:offy+newh,offx:offx+neww,:] = resize(img.copy()/255.0,(newh,neww),1)
        im = imgb[:,:,(2,1,0)]
        '''

        imgw = img.shape[1]
        imgh = img.shape[0]
        imgb = np.empty((dim[0], dim[1], 3))
        imgb.fill(0.5)

        #neww = 416
        #newh = 416
        neww = dim[0]
        newh = dim[1]

        offx = int((dim[0] - neww) / 2)
        offy = int((dim[1] - newh) / 2)
        start_time = datetime.now()
        imgb[offy:offy + newh,
             offx:offx + neww, :] = resize(img.copy() / 255.0, (newh, neww), 1)
        #imgb[offy:offy+newh,offx:offx+neww,:] = resize(img/255.0,(newh,neww),1)
        #imgb[offy:offy+newh,offx:offx+neww,:] = cv2.resize(img/255.0, (newh, neww))
        print(datetime.now() - start_time)
        im = imgb[:, :, (2, 1, 0)]
        #im = imgb

        return im, int(offx * imgw / neww), int(
            offy * imgh / newh), neww / dim[0], newh / dim[1]

    def Reshape(self, out, dim):
        shape = out.shape
        out = np.transpose(out.reshape(self.wh, int(shape[0] / self.wh)))
        out = out.reshape(shape)
        return out

    def non_max_suppress(self, predicts_dict, threshold=0.3):
        """
        implement non-maximum supression on predict bounding boxes.
        Args:
            predicts_dict: {"stick": [[x1, y1, x2, y2, scores1], [...]]}.
            threshhold: iou threshold
        Return:
            predicts_dict processed by non-maximum suppression
        """
        for object_name, bbox in predicts_dict.items():  #对每一个类别的目标分别进行NMS
            #if(len(bbox)<2):
            #continue
            bbox_array = np.array(
                bbox, dtype=np.float
            )  ## 获取当前目标类别下所有矩形框(bounding box,下面简称bbx)的坐标和confidence,并计算所有bbx的面积
            #print('bbox_array:{0}'.format(bbox_array))
            x1, y1, x2, y2, scores = bbox_array[:,
                                                0], bbox_array[:,
                                                               1], bbox_array[:,
                                                                              2], bbox_array[:,
                                                                                             3], bbox_array[:,
                                                                                                            4]
            areas = (x2 - x1 + 1) * (y2 - y1 + 1)
            #print "areas shape = ", areas.shape
            ## 对当前类别下所有的bbx的confidence进行从高到低排序(order保存索引信息)
            order = scores.argsort()[::-1]
            #print ("order = {0}".format(order))
            keep = [
            ]  #用来存放最终保留的bbx的索引信息 ## 依次从按confidence从高到低遍历bbx,移除所有与该矩形框的IOU值大于threshold的矩形框
            while order.size > 0:
                i = order[0]
                keep.append(
                    i
                )  #保留当前最大confidence对应的bbx索引 ## 获取所有与当前bbx的交集对应的左上角和右下角坐标,并计算IOU(注意这里是同时计算一个bbx与其他所有bbx的IOU)
                xx1 = np.maximum(x1[i], x1[
                    order[1:]])  #当order.size=1时,下面的计算结果都为np.array([]),不影响最终结果
                yy1 = np.maximum(y1[i], y1[order[1:]])
                xx2 = np.minimum(x2[i], x2[order[1:]])
                yy2 = np.minimum(y2[i], y2[order[1:]])
                inter = np.maximum(0.0, xx2 - xx1 + 1) * np.maximum(
                    0.0, yy2 - yy1 + 1)
                iou = inter / (areas[i] + areas[order[1:]] - inter)
                #print("iou = {0}".format(iou))
                #print(np.where(iou<=threshold)) #输出没有被移除的bbx索引(相对于iou向量的索引)
                indexs = np.where(
                    iou <= threshold
                )[0] + 1  #获取保留下来的索引(因为没有计算与自身的IOU,所以索引相差1,需要加上)
                #print ("indexs = {0}".format(type(indexs)))
                order = order[indexs]  #更新保留下来的索引
                #print ("order = {0}".format(order))
            bbox = bbox_array[keep]
            predicts_dict[object_name] = bbox.tolist()
            #predicts_dict = predicts_dict
        return predicts_dict

    def non_max_suppress_(self,
                          predicts_dict,
                          nms_tuple=(3, 5),
                          threshold=0.7):
        has_key1 = False
        has_key2 = False
        for key, value in predicts_dict.items():
            if (key == nms_tuple[0]):
                has_key1 = True
            elif (key == nms_tuple[1]):
                has_key2 = True
        if ((has_key1 == True) and (has_key2 == True)):
            bbx_array = np.array(predicts_dict[nms_tuple[1]], dtype=np.float)
            x1, y1, x2, y2, scores = bbx_array[:,
                                               0], bbx_array[:,
                                                             1], bbx_array[:,
                                                                           2], bbx_array[:,
                                                                                         3], bbx_array[:,
                                                                                                       4]
            areas = (x2 - x1 + 1) * (y2 - y1 + 1)
            keep = []
            for bbx in predicts_dict[nms_tuple[0]]:
                xx1 = np.maximum(bbx[0], x1)
                yy1 = np.maximum(bbx[1], y1)
                xx2 = np.minimum(bbx[2], x2)
                yy2 = np.minimum(bbx[3], y2)
                inter = np.maximum(0.0, xx2 - xx1 + 1) * np.maximum(
                    0.0, yy2 - yy1 + 1)
                iou = inter / ((bbx[2] - bbx[0] + 1) *
                               (bbx[3] - bbx[1] + 1) + areas - inter)
                print('iou:{0}'.format(iou))
                print('keep:{0}'.format(
                    np.where(iou <= threshold)))  #输出没有被移除的bbx索引(相对于iou向量的索引)
                indexs = np.where(iou > threshold)[0]  #获取保留下来的索引
                print('keep index:{0}'.format(indexs))
                keep.append(indexs)
                print('keep:{0}'.format(keep))
        #bbox = bbox_array[keep]
        #predicts_dict[object_name] = bbox.tolist()
        #predicts_dict = predicts_dict

    #return predicts_dict

    def Detect(self, img, idx=0):
        """Send image for inference on a single compute stick
           
            Args:
                img: openCV image type
                idx: index of the compute stick to use for inference
            Returns:
                [<BBox>]: array of BBox type objects for each result in the detection
        """
        imgw = img.shape[1]
        imgh = img.shape[0]

        im, offx, offy, xscale, yscale = self.PrepareImage(img, self.dim)
        #print('xscale = {}, yscale = {}'.format(xscale, yscale))

        ObjectWrapper.graphHandle[idx].queue_inference_with_fifo_elem(
            ObjectWrapper.fifoInHandle[idx], ObjectWrapper.fifoOutHandle[idx],
            im.astype(np.float32), 'user object')
        out, userobj = ObjectWrapper.fifoOutHandle[idx].read_elem()

        ###################################################################
        '''
        reshaped_out = out.reshape(13, 165, 13)
        transposed_out = np.transpose(reshaped_out, (2, 0, 1))
        '''
        reshaped_out = out.reshape(self.blockwd, 165, self.blockwd)
        transposed_out = np.transpose(reshaped_out, (2, 0, 1))

        ###################################################################

        transposed_out = transposed_out.reshape(165, self.blockwd,
                                                self.blockwd)
        first_132 = transposed_out[:132]
        first_132 = first_132.reshape(33, self.blockwd * 2, self.blockwd * 2)
        last_33 = transposed_out[132:]
        #print('layer23-conv:\n{0}'.format(first_132))
        #print('layer16-conv:\n{0}'.format(last_33))

        ###################################################################
        ###out = self.Reshape(out, self.dim)
        out1 = last_33.reshape(self.blockwd * self.blockwd * 33)
        internalresults1 = self.detector.Detect(out1.astype(np.float32), 33,
                                                self.blockwd, self.blockwd,
                                                self.classes, imgw, imgh,
                                                self.threshold, self.nms,
                                                self.targetBlockwd)
        pyresults1 = [
            BBox(x, xscale, yscale, offx, offy) for x in internalresults1
        ]

        out2 = first_132.reshape(self.blockwd * 2 * self.blockwd * 2 * 33)
        internalresults2 = self.detector.Detect(out2.astype(np.float32), 33,
                                                self.blockwd * 2,
                                                self.blockwd * 2, self.classes,
                                                imgw, imgh, self.threshold,
                                                self.nms, self.blockwd * 2)
        pyresults2 = [
            BBox(x, xscale, yscale, offx, offy) for x in internalresults2
        ]
        pyresults3 = pyresults1 + pyresults2

        #return pyresults3

        pre_dic = {}
        list_all = []
        for i in np.arange(6):
            list_temp = []
            for bbx in pyresults3:
                if (bbx.objType == i):
                    list_temp.append([
                        bbx.left, bbx.top, bbx.right, bbx.bottom,
                        bbx.confidence
                    ])
                    #print(list_temp)
            if (len(list_temp) == 0):
                continue
            else:
                pre_dic[i] = list_temp
            #--list_all.append(list_temp)
        #--list_key = np.arange(6)
        #--predict_dicts = dict(zip(list_key, list_all))
        #--print('predict_dicts:{0}'.format(predict_dicts))
        #print('pre_dic:{0}'.format(pre_dic))
        nms_pred_dict = self.non_max_suppress(pre_dic)
        #print('nmsed_dict:{0}'.format(nms_pred_dict))
        if (nms_pred_dict == None):
            return []
        ##-------------------------test start
        #self.non_max_suppress_(nms_pred_dict)
        ##-------------------------test end

        nmsed_between_layer_results = []
        for object_id, bboxes in nms_pred_dict.items():
            #print('object_id:{0}'.format(object_id))
            #print('bboxes:{0}'.format(bboxes))
            for bbox in bboxes:
                bbox.append(object_id)
                #print('bbox:{0}'.format(bbox))
                BBox__ = BBox_(bbox, xscale, yscale, offx, offy)
                nmsed_between_layer_results.append(BBox__)

        return nmsed_between_layer_results
        '''out1 = last_33.reshape(13*13*33)
        #internalresults1 = self.detector.Detect(out1.astype(np.float32), 33, self.blockwd, self.blockwd, self.classes, imgw, imgh, self.threshold, self.nms, self.targetBlockwd)
        #pyresults1 = [BBox(x,xscale,yscale, offx, offy) for x in internalresults1]

        out2 = first_132.reshape(26*26*33)
        internalresults2 = self.detector.Detect(out2.astype(np.float32), 33, 26, 26, self.classes, imgw, imgh, self.threshold, self.nms, 26)
        pyresults2 = [BBox(x,xscale,yscale, offx, offy) for x in internalresults2]
        pyresults3 = pyresults1 + pyresults2
        return pyresults3'''

    def Parallel(self, img):
        """Send array of images for inference on multiple compute sticks
           
            Args:
                img: array of images to run inference on
           
            Returns:
                { <int>:[<BBox] }: A dict with key-value pairs mapped to compute stick device numbers and arrays of the detection boxs (BBox)
        """
        pyresults = {}
        for i in range(ObjectWrapper.devNum):
            im, offx, offy, w, h = self.PrepareImage(img[i], self.dim)
            ObjectWrapper.graphHandle[i].queue_inference_with_fifo_elem(
                ObjectWrapper.fifoInHandle[i], ObjectWrapper.fifoOutHandle[i],
                im.astype(np.float32), 'user object')
        for i in range(ObjectWrapper.devNum):
            out, userobj = ObjectWrapper.fifoOutHandle[i].read_elem()
            out = self.Reshape(out, self.dim)
            imgw = img[i].shape[1]
            imgh = img[i].shape[0]
            internalresults = self.detector.Detect(out.astype(np.float32),
                                                   int(out.shape[0] / self.wh),
                                                   self.blockwd, self.blockwd,
                                                   self.classes, imgw, imgh,
                                                   self.threshold, self.nms,
                                                   self.targetBlockwd)
            res = [BBox(x, w, h, offx, offy) for x in internalresults]
            if i not in pyresults:
                pyresults[i] = res
        return pyresults
Exemplo n.º 16
0
def main():
    print('test start')
    func()
    return
    print('Running NCS Caffe TinyYolo example')

    # Set logging level to only log errors
    mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 3)
    devices = mvnc.enumerate_devices()
    if len(devices) == 0:
        print('No devices found')
        return 1
    device = mvnc.Device(devices[0])
    device.open()

    #Load graph from disk and allocate graph via API
    with open(tiny_yolo_graph_file, mode='rb') as f:
        graph_from_disk = f.read()
    graph = mvnc.Graph("Tiny Yolo Graph")
    fifo_in, fifo_out = graph.allocate_with_fifos(device, graph_from_disk)

    # Read image from file, resize it to network width and height
    # save a copy in display_image for display, then convert to float32, normalize (divide by 255),
    # and finally convert to convert to float16 to pass to LoadTensor as input for an inference
    cv_window_name='123'

    while (True):
        video_device = cv2.VideoCapture("./" + input_image_file)
        actual_frame_width = video_device.get(cv2.CAP_PROP_FRAME_WIDTH)
        actual_frame_height = video_device.get(cv2.CAP_PROP_FRAME_HEIGHT)
        print ('actual video resolution: ' + str(actual_frame_width) + ' x ' + str(actual_frame_height))
#        if ((video_device == None) or (not video_device.isOpened())):
#            print ('Could not open video device.  Make sure file exists:')
#            print ('file name:' + input_video_file)
#            print ('Also, if you installed python opencv via pip or pip3 you')
#            print ('need to uninstall it and install from source with -D WITH_V4L=ON')
#            print ('Use the provided script: install-opencv-from_source.sh')

       # frame_count = 0
       # start_time = time.time()

#frame_count = frame_count + 1

#frames_per_second = frame_count / (end_time - start_time)


        while True :
            print ('Use the provided script: install-opencv-from_source.sh')
            time1  = time.time()
            ret_val, input_image = video_device.read()

        time2  = time.time()

        start_time = time.time()


        display_image = input_image



        input_image = cv2.resize(input_image, (NETWORK_IMAGE_WIDTH, NETWORK_IMAGE_HEIGHT), cv2.INTER_LINEAR)
        input_image = input_image.astype(np.float32)
        input_image = np.divide(input_image, 255.0)
        input_image = input_image[:, :, ::-1]  # convert to RGB

        time3  = time.time()
        # Load tensor and get result.  This executes the inference on the NCS
        graph.queue_inference_with_fifo_elem(fifo_in, fifo_out, input_image.astype(np.float32), None)
        output, userobj = fifo_out.read_elem()

        # filter out all the objects/boxes that don't meet thresholds
        filtered_objs = filter_objects(output.astype(np.float32), input_image.shape[1], input_image.shape[0])

        print('Displaying image with objects detected in GUI')
        print('Click in the GUI window and hit any key to exit')
        #display the filtered objects/boxes in a GUI window
        display_objects_in_gui(display_image, filtered_objs)



        time4  = time.time()










        end_time = time.time()
        time_used=end_time-start_time
        print("time used : "+ str(time_used)+"time point:"+str(time1)+" "+str(time2)+" "+str(time3)+" "+str(time4))
        cv2.imshow(cv_window_name, input_image)
        #raw_key = cv2.waitKey(1)

        if (not ret_val):
             #   end_time = time.time()
                print("No image from from video device, exiting")
                break

             # resize image to network width and height
             # then convert to float32, normalize (divide by 255),
             # and finally convert to float16 to pass to LoadTensor as input
             # for an inference
       # input_image = cv2.resize(input_image, (TY_NETWORK_IMAGE_WIDTH, TY_NETWORK_IMAGE_HEIGHT), cv2.INTER_LINEAR)



             # save a display image as read from video device.

        # close video device
        video_device.release()














    input_image = cv2.imread(input_image_file)
    display_image = input_image



    input_image = cv2.resize(input_image, (NETWORK_IMAGE_WIDTH, NETWORK_IMAGE_HEIGHT), cv2.INTER_LINEAR)
    input_image = input_image.astype(np.float32)
    input_image = np.divide(input_image, 255.0)
    input_image = input_image[:, :, ::-1]  # convert to RGB

    # Load tensor and get result.  This executes the inference on the NCS
    graph.queue_inference_with_fifo_elem(fifo_in, fifo_out, input_image.astype(np.float32), None)
    output, userobj = fifo_out.read_elem()

    # filter out all the objects/boxes that don't meet thresholds
    filtered_objs = filter_objects(output.astype(np.float32), input_image.shape[1], input_image.shape[0])

    print('Displaying image with objects detected in GUI')
    print('Click in the GUI window and hit any key to exit')
    #display the filtered objects/boxes in a GUI window
    display_objects_in_gui(display_image, filtered_objs)

    fifo_in.destroy()
    fifo_out.destroy()
    graph.destroy()
    device.close()
    device.destroy()
    print('Finished')
Exemplo n.º 17
0
def inferencer(results, frameBuffer, ssd_detection_mode, face_detection_mode, devnum, mp_active_stick_number, mp_stick_temperature):

    graphs = []
    graph_buffers = []
    graphHandles = []
    graphHandle0 = None
    graphHandle1 = None

    mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 4)
    devices = mvnc.enumerate_devices()
    if len(devices) == 0:
        print("No NCS devices found")
        sys.exit(1)
    print(len(devices))

    # 1:= Enabled MobileNet-SSD Model
    if ssd_detection_mode == 1:
        with open(join(graph_folder, "graph"), mode="rb") as f:
            graph_buffers.append(f.read())
        graphs.append(mvnc.Graph('MobileNet-SSD'))

    # 1:= Enabled Fullweight FaceDetection Model
    if face_detection_mode == 1:
        with open(join(graph_folder, "graph.fullfacedetection"), mode="rb") as f:
            graph_buffers.append(f.read())
        graphs.append(mvnc.Graph('FullFaceDetection'))

    # 2:= Enabled Lightweight FaceDetection Model
    if face_detection_mode == 2:
        with open(join(graph_folder, "graph.shortfacedetection"), mode="rb") as f:
            graph_buffers.append(f.read())
        graphs.append(mvnc.Graph('ShortFaceDetection'))

    devopen = False
    for device in devices:
        try:
            device = mvnc.Device(device)
            device.open()
            for (graph, graph_buffer) in zip(graphs, graph_buffers):
                graphHandles.append(graph.allocate_with_fifos(device, graph_buffer))
            devopen = True
            break
        except:
            continue

    if devopen == False:
        print("NCS Devices open Error!!!")
        sys.exit(1)

    print("Loaded Graphs!!! ")

    THERMAL_STATS = mvnc.DeviceOption.RO_THERMAL_STATS
    temperature = device.get_option

    while True:
        # 0:= Inactive stick, 1:= Active stick
        if mp_active_stick_number[devnum] == 0:
            continue

        # Measure the temperature inside the stick
        mp_stick_temperature[devnum] = temperature(THERMAL_STATS)[0]

        try:
            if frameBuffer.empty():
                continue

            color_image = frameBuffer.get()
            prepimg = preprocess_image(color_image)
            res = None
            for (graph, graphHandle) in zip(graphs, graphHandles):
                graphHandle0 = graphHandle[0]
                graphHandle1 = graphHandle[1]
                graph.queue_inference_with_fifo_elem(graphHandle0, graphHandle1, prepimg.astype(np.float32), None)
                out, _ = graphHandle1.read_elem()
                num_valid_boxes = int(out[0])
                if num_valid_boxes > 0:
                    if isinstance(res, type(None)):
                        res = [out]
                    else:
                        res = np.append(res, [out], axis=0)
            results.put(res)
        except:
            import traceback
            traceback.print_exc()
Exemplo n.º 18
0
def main():
    # 設定程式參數
    arg_parser = argparse.ArgumentParser(description='使用 Movidius 進行預測')
    arg_parser.add_argument(
        '--graph-file',
        required=True,
        help='Movidius 模型檔',
    )
    arg_parser.add_argument(
        '--video-type',
        choices=['file', 'camera'],
        default='camera',
        help='影片類型',
    )
    arg_parser.add_argument(
        '--source',
        default='/dev/video0',
        help='影片來源檔',
    )
    arg_parser.add_argument(
        '--input-width',
        type=int,
        default=48,
        help='模型輸入影像寬度',
    )
    arg_parser.add_argument(
        '--input-height',
        type=int,
        default=48,
        help='模型輸入影像高度',
    )
    arg_parser.add_argument(
        '--gui',
        action='store_true',
        help='啓用圖像界面',
    )

    # 解讀程式參數
    args = arg_parser.parse_args()
    assert args.input_width > 0 and args.input_height > 0

    # 設置 Movidius 裝置
    mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 2)
    mvnc_devices = mvnc.enumerate_devices()

    if not mvnc_devices:
        print('找不到 Movidius 裝置')
        exit(1)

    mvnc_dev = mvnc.Device(mvnc_devices[0])
    mvnc_dev.open()

    # 載入模型檔
    try:
        with open(args.graph_file, mode='rb') as file_graph:
            graph_buffer = file_graph.read()
    except (FileNotFoundError, IOError):
        print('無法載入模型檔')
        exit(1)

    graph = mvnc.Graph('graph')
    fifo_in, fifo_out = graph.allocate_with_fifos(mvnc_dev, graph_buffer)

    # 開啓影片來源
    if args.video_type == 'file':  # 檔案
        video_dev = cv2.VideoCapture(args.source)
        video_width = video_dev.get(cv2.CAP_PROP_FRAME_WIDTH)
        video_height = video_dev.get(cv2.CAP_PROP_FRAME_HEIGHT)

    elif args.video_type == 'camera':  # 攝影機
        video_dev = cv2.VideoCapture(0)

    # 主迴圈
    try:
        prev_timestamp = time.time()

        while True:
            ret, orig_image = video_dev.read()
            curr_time = time.localtime()

            # 檢查串流是否結束
            if ret is None or orig_image is None:
                break

            # 縮放爲模型輸入的維度、調整數字範圍爲 0~1 之間的數值
            resized_image = cv2.resize(
                orig_image,
                (args.input_width, args.input_height),
            ).astype(np.float32)
            normalized_image = resized_image / 255.0

            # 執行預測
            graph.queue_inference_with_fifo_elem(
                fifo_in,
                fifo_out,
                normalized_image,
                None,
            )
            result_onehot, _ = fifo_out.read_elem()

            left_score, right_score, stop_score, other_score = result_onehot
            class_id = np.argmax(result_onehot)

            if class_id == 0:
                class_str = 'left'
            elif class_id == 1:
                class_str = 'right'
            elif class_id == 2:
                class_str = 'stop'
            elif class_id == 3:
                class_str = 'other'

            # 計算執行時間
            recent_timestamp = time.time()
            period = recent_timestamp - prev_timestamp
            prev_timestamp = recent_timestamp

            print('時間:%02d:%02d:%02d ' %
                  (curr_time.tm_hour, curr_time.tm_min, curr_time.tm_sec))
            print('輸出:%.2f %.2f %.2f %.2f' %
                  (left_score, right_score, stop_score, other_score))
            print('類別:%s' % class_str)
            print('費時:%f' % period)
            print()

            # 顯示圖片
            if args.gui:
                cv2.imshow('', orig_image)
                cv2.waitKey(1)

    except KeyboardInterrupt:
        print('使用者中斷')

    # 終止影像裝置
    video_dev.release()

    # 終止 Movidius 裝置
    fifo_in.destroy()
    fifo_out.destroy()
    graph.destroy()
    mvnc_dev.close()
    mvnc_dev.destroy()
Exemplo n.º 19
0
def main():
    print('Running NCS Caffe TinyYolo example')

    # Set logging level to only log errors
    mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 3)
    devices = mvnc.enumerate_devices()
    if len(devices) == 0:
        print('No devices found')
        return 1
    device = mvnc.Device(devices[0])
    device.open()

    #Load graph from disk and allocate graph via API
    with open(tiny_yolo_graph_file, mode='rb') as f:
        graph_from_disk = f.read()
    graph = mvnc.Graph("Tiny Yolo Graph")
    fifo_in, fifo_out = graph.allocate_with_fifos(device, graph_from_disk)

    #------------------------------
    # capture
    #------------------------------
    cap = cv2.VideoCapture(0)

    while True:
        ret, frame = cap.read()

        # Wait key
        key = cv2.waitKey(1)
        if key != -1:
            break

        # Read image from file, resize it to network width and height
        # save a copy in display_image for display, then convert to float32, normalize (divide by 255),
        # and finally convert to convert to float16 to pass to LoadTensor as input for an inference
        #input_image = cv2.imread(input_image_file)
        input_image = frame
        display_image = input_image
        input_image = cv2.resize(input_image,
                                 (NETWORK_IMAGE_WIDTH, NETWORK_IMAGE_HEIGHT),
                                 cv2.INTER_LINEAR)
        input_image = input_image.astype(np.float32)
        input_image = np.divide(input_image, 255.0)
        input_image = input_image[:, :, ::-1]  # convert to RGB

        # Load tensor and get result.  This executes the inference on the NCS
        graph.queue_inference_with_fifo_elem(fifo_in, fifo_out,
                                             input_image.astype(np.float32),
                                             None)
        output, userobj = fifo_out.read_elem()

        # filter out all the objects/boxes that don't meet thresholds
        filtered_objs = filter_objects(output.astype(np.float32),
                                       input_image.shape[1],
                                       input_image.shape[0])

        print('Displaying image with objects detected in GUI')
        print('Click in the GUI window and hit any key to exit')
        #display the filtered objects/boxes in a GUI window
        display_objects_in_gui(display_image, filtered_objs)

        # Display
        cv2.imshow("window", display_image)

    fifo_in.destroy()
    fifo_out.destroy()
    graph.destroy()
    device.close()
    device.destroy()
    print('Finished')

    # Clean up capture
    cap.release()
    cv2.destroyAllWindows()
Exemplo n.º 20
0
def main():
    vs=VideoStream(usePiCamera=True).start()
    time.sleep(1)
    # print('Running NCS Caffe TinyYolo example')


    i = 0

    # Set logging level to only log errors
    mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 3)
    devices = mvnc.enumerate_devices()  # TODO use the mvnc API to querydevices.
    if len(devices) == 0:
        #print('No devices found')
        return 1
    device = mvnc.Device(devices[0])  # TODO: use the mvnc API to assign the first device in devices to the device variable.
    device.open()

    #Load graph from disk and allocate graph via API
    with open(tiny_yolo_graph_file, mode='rb') as f:
        graph_from_disk = f.read()
    graph = mvnc.Graph("Tiny Yolo Graph")
    fifo_in, fifo_out = graph.allocate_with_fifos(device, graph_from_disk)  # TODO: Instantiate fifo_in and fifo_out using the graph file above.

    # Read image from file, resize it to network width and height
    # save a copy in display_image for display, then convert to float32, normalize (divide by 255),
    # and finally convert to convert to float16 to pass to LoadTensor as input for an inference
    prev_time = 0
    while True:

        frame=vs.read()# Get a frame from a video stream.
        input_image=frame.copy()# copy frame to an input image.
        display_image = cv2.resize(input_image, (NETWORK_IMAGE_WIDTH, NETWORK_IMAGE_HEIGHT), cv2.INTER_LINEAR)
        input_image = cv2.resize(input_image, (NETWORK_IMAGE_WIDTH, NETWORK_IMAGE_HEIGHT), cv2.INTER_LINEAR)
        input_image = input_image.astype(np.float32)
        input_image = (input_image*(1.0/255.0))# TODO: Scale values between 0 and 255
        input_image = input_image[:, :, ::-1]  # convert to RGB

        #TODO: Use the queue_inference_with_fifo_elem to load the image and get the result from the NCS. This should be one line of code.

        graph.queue_inference_with_fifo_elem(fifo_in, fifo_out, input_image.astype(np.float32), None)
        output, userobj = fifo_out.read_elem()

        # filter out all the objects/boxes that don't meet thresholds
        filtered_objs = filter_objects(output.astype(np.float32), input_image.shape[1], input_image.shape[0])
        current_time = time.time()
        source_image_width = display_image.shape[1]
        source_image_height = display_image.shape[0]

        x_ratio = float(source_image_width) / NETWORK_IMAGE_WIDTH
        y_ratio = float(source_image_height) / NETWORK_IMAGE_HEIGHT
        if prev_time < current_time - 0.5:
            prev_time = current_time
            print('saving images');
            for obj_index in range(0,min(len(filtered_objs),5)):
                print(obj_index);
                center_x = int(filtered_objs[obj_index][1] * x_ratio)
                center_y = int(filtered_objs[obj_index][2] * y_ratio)
                half_width = int(filtered_objs[obj_index][3] * x_ratio)//2
                half_height = int(filtered_objs[obj_index][4] * y_ratio)//2

                # calculate box (left, top) and (right, bottom) coordinates
                box_left = max(center_x - half_width, 0)
                box_top = max(center_y - half_height, 0)
                box_right = min(center_x + half_width, source_image_width)
                box_bottom = min(center_y + half_height, source_image_height)
                image = display_image[box_top:box_bottom, box_left:box_right]
                cv2.imwrite('task7_images/'+str(obj_index)+'.jpg', image)

        # print('Displaying image with objects detected in GUI')
        # print('Click in the GUI window and hit any key to exit')
        #display the filtered objects/boxes in a GUI window
        display_objects_in_gui(display_image, filtered_objs)

    fifo_in.destroy()
    fifo_out.destroy()
    graph.destroy()
    device.close()
    device.destroy()
    print('Finished')
Exemplo n.º 21
0
def main():
    print('Running NCS Caffe TinyYolo example')

    # Set logging level to only log errors
    mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 3)
    devices = mvnc.enumerate_devices()
    if len(devices) == 0:
        print('No devices found')
        return 1
    device = mvnc.Device(devices[0])
    device.open()

    #Load graph from disk and allocate graph via API
    with open(tiny_yolo_graph_file, mode='rb') as f:
        graph_from_disk = f.read()
    graph = mvnc.Graph("Tiny Yolo Graph")
    fifo_in, fifo_out = graph.allocate_with_fifos(device, graph_from_disk)

    #------------------------------
    # capture
    #------------------------------
    cap = cv2.VideoCapture("cars.mp4")

    #------------------------------
    # pygame init
    #------------------------------
    pygame.init()
    screen = pygame.display.set_mode((640, 360))
    pygame.display.set_caption("TinyYolo VR")
    background = pygame.image.load("aquarium.jpg")
    player = pygame.image.load("jellyfish.png").convert_alpha()
    x_ratio = float(screen.get_width()) / NETWORK_IMAGE_WIDTH
    y_ratio = float(screen.get_height()) / NETWORK_IMAGE_HEIGHT

    while True:
        ret, frame = cap.read()
        if ret == False:
            break

        # Wait key
        key = cv2.waitKey(1)
        if key != -1:
            break

        # Read image from file, resize it to network width and height
        # save a copy in display_image for display, then convert to float32, normalize (divide by 255),
        # and finally convert to convert to float16 to pass to LoadTensor as input for an inference
        #input_image = cv2.imread(input_image_file)
        input_image = frame
        display_image = input_image
        input_image = cv2.resize(input_image,
                                 (NETWORK_IMAGE_WIDTH, NETWORK_IMAGE_HEIGHT),
                                 cv2.INTER_LINEAR)
        input_image = input_image.astype(np.float32)
        input_image = np.divide(input_image, 255.0)
        input_image = input_image[:, :, ::-1]  # convert to RGB

        # Load tensor and get result.  This executes the inference on the NCS
        graph.queue_inference_with_fifo_elem(fifo_in, fifo_out,
                                             input_image.astype(np.float32),
                                             None)
        output, userobj = fifo_out.read_elem()

        # filter out all the objects/boxes that don't meet thresholds
        filtered_objs = filter_objects(output.astype(np.float32),
                                       input_image.shape[1],
                                       input_image.shape[0])

        print('Displaying image with objects detected in GUI')
        print('Click in the GUI window and hit any key to exit')
        #display the filtered objects/boxes in a GUI window
        display_objects_in_gui(display_image, filtered_objs)

        # Display
        cv2.imshow("window", display_image)

        #------------------------------
        # pygame draw
        #------------------------------
        #draw background
        screen.blit(background, (0, 0))

        #draw player
        for obj_index in range(len(filtered_objs)):
            center_x = int(filtered_objs[obj_index][1] * x_ratio)
            center_y = int(filtered_objs[obj_index][2] * y_ratio)
            screen.blit(player, (center_x - player.get_width() / 2,
                                 center_y - player.get_height() / 2))

        #update display
        pygame.display.flip()

    fifo_in.destroy()
    fifo_out.destroy()
    graph.destroy()
    device.close()
    device.destroy()
    print('Finished')

    # Clean up capture
    cap.release()
    cv2.destroyAllWindows()
Exemplo n.º 22
0
def compare():
    mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 2)

    # enumerate all devices
    devices = mvnc.enumerate_devices()
    if len(devices) == 0:
        print('No devices found')
        quit()

    # use the first device found
    device = mvnc.Device(devices[0])
    # open the device
    device.open()

    # load the model from the disk
    with open(GRAPH_PATH, mode='rb') as f:
        graph_in_memory = f.read()

    graph = mvnc.Graph(GRAPH_PATH)
    # create the input and output fifos
    fifo_in, fifo_out = graph.allocate_with_fifos(device, graph_in_memory)

    fps = 0.0
    #Webcam mode, else video file mode
    if args["video"].isdigit():
        args["video"] = int(args["video"])
    cap = cv2.VideoCapture(args["video"])
    if args["mode"] == "image":
        # read an image in bgr format
        img = cv2.imread(IMAGE_FROM_DISK)
        original_img = img

        # bgr input scaling
        img = np.divide(img, 255.0)
        resized_img = cv2.resize(img, (416, 416), cv2.INTER_LINEAR)

        # transpose the image to rgb
        resized_img = resized_img[:, :, ::-1]
        resized_img = resized_img.astype(np.float32)

        # make an inference
        graph.queue_inference_with_fifo_elem(fifo_in, fifo_out, resized_img,
                                             'user object')
        # get the result
        output, userobj = fifo_out.read_elem()

        # Tiny Yolo V2 requires post processing to filter out duplicate objects and low score objects
        # After post processing, the app will display the image and any detected objects
        post_processing(output, original_img)

        cv2.imshow('Tiny Yolo V2', original_img)
        cv2.waitKey()

    while args["mode"] == "video":
        start = time.time()
        ret, display_image = cap.read()
        if not ret:
            print("No image found from source, exiting")
            break
        original_img = display_image

        # bgr input scaling
        display_image = np.divide(display_image, 255.0)
        resized_img = cv2.resize(display_image, (416, 416), cv2.INTER_LINEAR)

        # transpose the image to rgb
        resized_img = resized_img[:, :, ::-1]
        resized_img = resized_img.astype(np.float32)
        # make an inference
        graph.queue_inference_with_fifo_elem(fifo_in, fifo_out, resized_img,
                                             'user object')
        # get the result
        output, userobj = fifo_out.read_elem()
        # Tiny Yolo V2 requires post processing to filter out duplicate objects and low score objects
        # After post processing, the app will display the image and any detected objects
        output_image = post_processing(output, original_img)

        fps = (fps + (1 / (time.time() - start))) / 2
        output_image = cv2.putText(output_image, "fps: {:.1f}".format(fps),
                                   (0, 20), cv2.FONT_HERSHEY_SIMPLEX, 1,
                                   (0, 0, 0), 1, 4)

        cv2.imshow(cv_window_name, output_image)

        if cv2.getWindowProperty(cv_window_name,
                                 cv2.WND_PROP_ASPECT_RATIO) < 0.0:
            print("Window closed")
            break
        elif cv2.waitKey(1) & 0xFF == ord('q'):
            print("Q pressed")
            break
    cap.release()
    cv2.destroyAllWindows()
    # clean up
    fifo_in.destroy()
    fifo_out.destroy()
    graph.destroy()
    device.close()
    device.destroy()
    print("Finished")
Exemplo n.º 23
0
def main():
    # 設定程式參數
    arg_parser = argparse.ArgumentParser(description='軌跡車程式。')
    arg_parser.add_argument(
        '--model-file',
        required=True,
        help='Movidius 模型檔',
    )
    arg_parser.add_argument(
        '--input-width',
        type=int,
        default=48,
        help='模型輸入影像寬度',
    )
    arg_parser.add_argument(
        '--input-height',
        type=int,
        default=48,
        help='模型輸入影像高度',
    )

    # 解讀程式參數
    args = arg_parser.parse_args()
    assert args.input_width > 0 and args.input_height > 0

    # 設置 Movidius 裝置
    mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 2)
    mvnc_devices = mvnc.enumerate_devices()

    if not mvnc_devices:
        print('找不到 Movidius 裝置')
        exit(1)

    mvnc_dev = mvnc.Device(mvnc_devices[0])
    mvnc_dev.open()

    # 載入模型檔
    try:
        with open(args.model_file, mode='rb') as file_graph:
            graph_buffer = file_graph.read()
    except (FileNotFoundError, IOError):
        print('無法載入模型檔')
        exit(1)

    graph = mvnc.Graph('graph')
    fifo_in, fifo_out = graph.allocate_with_fifos(mvnc_dev, graph_buffer)

    # 開啓影片來源
    video_dev = cv2.VideoCapture(0)

    # 初始化 GPIO
    GPIO.setwarnings(False)
    GPIO.setmode(GPIO.BCM)
    GPIO.setup(PWM_PIN_left, GPIO.OUT)
    GPIO.setup(PWM_PIN_right, GPIO.OUT)
    pwm1 = GPIO.PWM(PWM_PIN_left, 500)
    pwm2 = GPIO.PWM(PWM_PIN_right, 500)
    pwm1.start(0)
    pwm2.start(0)

    GPIO.setup(IR_RIGHT_PIN, GPIO.IN)  #GPIO 2 -> Left IR out
    GPIO.setup(IR_MIDDLE_PIN, GPIO.IN)  #GPIO 3 -> Right IR out
    GPIO.setup(IR_LEFT_PIN, GPIO.IN)  #GPIO 4 -> Right IR out

    def recognize_image():

        # 先丟掉前十張舊的辨識結果
        for i in range(10):
            image = video_dev.read()

        ret, orig_image = video_dev.read()
        assert ret is not None

        # 縮放爲模型輸入的維度、調整數字範圍爲 0~1 之間的數值
        resized_image = cv2.resize(
            orig_image,
            (args.input_width, args.input_height),
        ).astype(np.float32)
        normalized_image = resized_image / 255.0

        # 執行預測
        graph.queue_inference_with_fifo_elem(
            fifo_in,
            fifo_out,
            normalized_image,
            None,
        )
        result_onehot, _ = fifo_out.read_elem()
        class_id = np.argmax(result_onehot)
        left_score, right_score, stop_score, other_score = result_onehot

        print('預測:%.2f %.2f %.2f %.2f' %
              (left_score, right_score, stop_score, other_score))

        # print(result_onehot)
        if class_id == 0:
            return 'left'
        elif class_id == 1:
            return 'right'
        elif class_id == 2:
            return 'stop'
        elif class_id == 3:
            return 'other'

    def forward():
        pwm1.ChangeDutyCycle(DUTY_CYCLE)
        pwm2.ChangeDutyCycle(DUTY_CYCLE)

    def head_left():
        pwm1.ChangeDutyCycle(DUTY_CYCLE)
        pwm2.ChangeDutyCycle(0)

    def head_right():
        pwm1.ChangeDutyCycle(0)
        pwm2.ChangeDutyCycle(DUTY_CYCLE)

    def stop():
        pwm1.ChangeDutyCycle(0)
        pwm2.ChangeDutyCycle(0)

    def cross_left():
        time.sleep(1)

        pwm1.ChangeDutyCycle(100)
        pwm2.ChangeDutyCycle(0)
        time.sleep(0.35)

        pwm1.ChangeDutyCycle(0)
        pwm2.ChangeDutyCycle(0)
        time.sleep(1)

        pwm1.ChangeDutyCycle(100)
        pwm2.ChangeDutyCycle(100)
        time.sleep(1)

        pwm1.ChangeDutyCycle(0)
        pwm2.ChangeDutyCycle(0)
        time.sleep(0.5)

    def cross_right():
        time.sleep(1)

        pwm1.ChangeDutyCycle(0)
        pwm2.ChangeDutyCycle(100)
        time.sleep(0.35)

        pwm1.ChangeDutyCycle(0)
        pwm2.ChangeDutyCycle(0)
        time.sleep(1)

        pwm1.ChangeDutyCycle(100)
        pwm2.ChangeDutyCycle(100)
        time.sleep(1)

        pwm1.ChangeDutyCycle(0)
        pwm2.ChangeDutyCycle(0)
        time.sleep(0.5)

    def track_line():
        middle_val = GPIO.input(IR_MIDDLE_PIN)
        left_val = GPIO.input(IR_LEFT_PIN)
        right_val = GPIO.input(IR_RIGHT_PIN)
        print('光感:', left_val, middle_val, right_val)

        if middle_val:
            if left_val and right_val:  # 白白白
                return 'stop'
            elif left_val and not right_val:  # 白白黑
                return 'left'
            elif not left_val and right_val:  # 黑白白
                return 'right'
            else:
                return 'forward'  # 黑白黑
        else:
            if left_val and right_val:  # 白黑白
                return 'stall'
            elif left_val and not right_val:  # 白黑黑
                return 'left'
            elif not left_val and right_val:  # 黑黑白
                return 'right'
            else:  # 黑黑黑
                return 'stall'

    try:
        while True:
            advice = track_line()

            if advice == 'left':
                print('動作:', '左轉')
                head_left()

            elif advice == 'right':
                print('動作:', '右轉')
                head_right()

            elif advice == 'stop':
                print('動作:', '停止')
                stop()

                sign = recognize_image()

                if sign == 'left':
                    print('影像:', '左轉標誌')
                    cross_left()

                elif sign == 'right':
                    print('影像:', '右轉標誌')
                    cross_right()

                elif sign == 'stop':
                    print('影像:', '停止標誌')

                elif sign == 'other':
                    print('影像:', '無標誌')

            elif advice == 'forward':
                print('動作:', '前進')
                forward()

            elif advice == 'stall':
                print('動作:', '前進')
                forward()

            print()

    except KeyboardInterrupt:
        print('使用者中斷')

    # 終止馬達
    pwm1.stop()
    pwm2.stop()

    # 終止影像裝置
    video_dev.release()

    # 終止 Movidius 裝置
    fifo_in.destroy()
    fifo_out.destroy()
    graph.destroy()
    mvnc_dev.close()
    mvnc_dev.destroy()
Exemplo n.º 24
0
#! /usr/bin/env python3

# Copyright(c) 2017 Intel Corporation.
# License: MIT See LICENSE file in root directory.

# Python script to open and close a single NCS device

import mvnc.mvncapi as fx

# main entry point for the program
if __name__ == "__main__":

    # set the logging level for the NC API
    # fx.SetGlobalOption(fx.GlobalOption.LOG_LEVEL, 0) # [aboutyou1219] NCSDK v1
    fx.global_set_option(fx.GlobalOption.RW_LOG_LEVEL, fx.LogLevel.DEBUG)

    # get a list of names for all the devices plugged into the system
    # ncs_names = fx.EnumerateDevices() # [aboutyou1219] NCSDK v1
    ncs_names = fx.enumerate_devices()

    if (len(ncs_names) < 1):
        print(
            "Error - no NCS devices detected, verify an NCS device is connected."
        )
        quit()

    # get the first NCS device by its name.  For this program we will always open the first NCS device.
    dev = fx.Device(ncs_names[0])

    # try to open the device.  this will throw an exception if someone else has it open already
    try:
Exemplo n.º 25
0
import numpy

x_train = mnist.train_images()
y_train = mnist.train_labels()

x_test = mnist.test_images()
y_test = mnist.test_labels()

# Prepare test image
test_idx = numpy.random.randint(0, 10000)
test_image = x_test[test_idx]
test_image = test_image.astype('float32') / 255.0

# Using NCS Predict
# set the logging level for the NC API
fx.global_set_option(fx.GlobalOption.RW_LOG_LEVEL, 0)

# get a list of names for all the devices plugged into the system
devices = fx.enumerate_devices()
if (len(devices) < 1):
    print("Error - no NCS devices detected, verify an NCS device is connected.")
    quit()

# get the first NCS device by its name.  For this program we will always open the first NCS device.
dev = fx.Device(devices[0])

# try to open the device.  this will throw an exception if someone else has it open already
try:
    dev.open()
except:
    print("Error - Could not open NCS device.")
def main():
    """Main function for the program.  Everything starts here.

    :return: None
    """
    global resize_output, resize_output_width, resize_output_height, \
           resize_output, resize_output_width, resize_output_height, \
           device_count


    if (not handle_args()):
        print_usage()
        return 1

    # get list of all the .mp4 files in the image directory
    input_video_filename_list = os.listdir(input_video_path)
    input_video_filename_list = [i for i in input_video_filename_list if i.endswith('.mp4')]
    if (len(input_video_filename_list) < 1):
        # no images to show
        print('No video (.mp4) files found')
        return 1

    resting_image = cv2.imread("resting_image.png")
    if (resting_image is None):
        resting_image = numpy.zeros((800, 600, 3), numpy.uint8)

    if (resize_output):
        resting_image = cv2.resize(resting_image,
                                   (resize_output_width, resize_output_height),
                                   cv2.INTER_LINEAR)

    # Set logging level to only log errors
    mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 3)

    devices = mvnc.enumerate_devices()
    if len(devices) < 1:
        print('No NCS device detected.')
        print('Insert device and try again!')
        return 1

    if (device_count < 1) or (device_count > len(devices)):
        device_count = len(devices)


    # Create an object detector processor for each device that opens
    # and store it in our list of processors
    obj_detect_list = list()
    idle_obj_detect_list = list()

    device_number = 0

    for one_device in devices:
        try:
            obj_detect_dev = mvnc.Device(one_device)
            obj_detect_dev.open()
            print("opened device " + str(device_number))
            obj_detector_proc = Yolov2_tiny_Processor(NETWORK_GRAPH_FILENAME, obj_detect_dev,
                                                      inital_box_prob_thresh=min_score_percent / 100.0,
                                                      classification_mask=object_classifications_mask,
                                                      name="object detector " + str(device_number))
            if (device_number < device_count):
                obj_detect_list.append(obj_detector_proc)
            else:
                idle_obj_detect_list.append(obj_detector_proc)

            device_number += 1

        except:
            print("Could not open device " + str(device_number) + ", trying next device")
            pass


    if len(obj_detect_list) < 1:
        print('Could not open any NCS devices.')
        print('Reinsert devices and try again!')
        return 1

    print("Using " + str(len(obj_detect_list)) + " devices for object detection")
    print_hot_keys()

    cv2.namedWindow(cv_window_name)
    cv2.moveWindow(cv_window_name, 10,  10)
    cv2.waitKey(1)

    exit_app = False
    while (True):
        for input_video_file in input_video_filename_list :

            for one_obj_detect_proc in obj_detect_list:
                print("using object detector: " + one_obj_detect_proc.get_name())
                one_obj_detect_proc.drain_queues()

            # video processor that will put video frames images on the object detector's input FIFO queue
            video_proc = VideoProcessor(input_video_path + '/' + input_video_file,
                                        network_processor_list = obj_detect_list)
            video_proc.start_processing()

            frame_count = 0
            start_time = time.time()
            last_throttle_time = start_time
            end_time = start_time
            uptime=time.time()
            while(True):
                done = False
                for one_obj_detect_proc in obj_detect_list:
                    try:
                        (filtered_objs, display_image) = one_obj_detect_proc.get_async_inference_result()
                        print("resive result:",time.time()-uptime)
                        uptime=time.time()
                    except :
                        print("exception caught in main")
                        raise


                    # check if the window is visible, this means the user hasn't closed
                    # the window via the X button
                    prop_val = cv2.getWindowProperty(cv_window_name, cv2.WND_PROP_ASPECT_RATIO)
                    if (prop_val < 0.0):
                        end_time = time.time()
                        video_proc.stop_processing()
                        video_proc.cleanup()
                        exit_app = True
                        break

                    running_fps = frame_count / (time.time() - start_time)
                    overlay_on_image(display_image, filtered_objs, running_fps)
                    print("show time:",time.time()-uptime)

                    if (resize_output):
                        display_image = cv2.resize(display_image,
                                                   (resize_output_width, resize_output_height),
                                                   cv2.INTER_LINEAR)
                    cv2.imshow(cv_window_name, display_image)

                    raw_key = cv2.waitKey(1)
                    if (raw_key != -1):
                        if (handle_keys(raw_key, obj_detect_list) == False):
                            end_time = time.time()
                            exit_app = True
                            done = True
                            break

                    frame_count += 1

                    #if (one_obj_detect_proc.is_input_queue_empty()):
                    if (not video_proc.is_processing()):
                        # asssume the video is over.
                        end_time = time.time()
                        done = True
                        print('video processor not processing, assuming video is finished.')
                        break

                #if (frame_count % 100) == 0:
                if ((time.time() - last_throttle_time) > throttle_check_seconds):
                    #long movie, check for throttling devices
                    # throttling = one_obj_detect_proc.get_device().get_option(mvnc.DeviceOption.RO_THERMAL_THROTTLING_LEVEL)
                    last_throttle_time = time.time()
                    print("movie not done, but going a long time so adjust for throttling")
                    video_proc.pause()
                    do_throttle_adjustment(obj_detect_list, idle_obj_detect_list)
                    video_proc.unpause()

                if (done) : break

            frames_per_second = frame_count / (end_time - start_time)
            print('Frames per Second: ' + str(frames_per_second))

            # check for throttling devices and save in throttling list
            throttling_list = list()
            for one_obj_detect_proc in obj_detect_list:
                throttling = one_obj_detect_proc.get_device().get_option(mvnc.DeviceOption.RO_THERMAL_THROTTLING_LEVEL)
                if (throttling > 0):
                    print("\nDevice " + one_obj_detect_proc.get_name() + " is throttling, level is: " + str(throttling))
                    throttling_list.append(one_obj_detect_proc)


            if (not exit_app):
                # rest between movies, display an image while resting
                resting_display_image = cv2.resize(resting_image,
                                                   (display_image.shape[1], display_image.shape[0]),
                                                   cv2.INTER_LINEAR)
                cv2.imshow(cv_window_name, resting_display_image)

                if ((len(throttling_list) > len(idle_obj_detect_list))):
                    # more devices throttling than we have in the idle list
                    # so do extra rest by applying a multiplier to the rest time
                    print("throttling devices... resting")
                    cv2.waitKey(rest_seconds * 1000 * rest_throttling_multiplier)
                else:
                    cv2.waitKey(rest_seconds * 1000)

            # remove the throttling devices from the main list and put them at the end so they will
            # be moved to the idle list with priority
            for one_throttling in throttling_list:
                obj_detect_list.remove(one_throttling)
                obj_detect_list.append(one_throttling)


            num_idle = len(idle_obj_detect_list)
            if (num_idle > len(obj_detect_list)):
                num_idle = len(obj_detect_list)
            if (num_idle > 0):
                # replace one of the devices with an idle device
                for idle_index in range(0, num_idle):
                    #for one_idle_proc in idle_obj_detect_list:
                    obj_detect_list.insert(0, idle_obj_detect_list.pop(0))

                for idle_count in range(0, num_idle):
                    idle_obj_detect_list.append(obj_detect_list.pop())

            video_proc.stop_processing()
            video_proc.cleanup()

            if (exit_app):
                break

        if (exit_app):
            break


    # Clean up the graph and the device
    for one_obj_detect_proc in obj_detect_list:
        cv2.waitKey(1)
        one_obj_detect_proc.cleanup(True)

    cv2.destroyAllWindows()
Exemplo n.º 27
0
def main():
    while True:
        vs=VideoStream(usePiCamera=True).start()
        time.sleep(1)
        print('Running NCS Caffe TinyYolo example')

    # Set logging level to only log errors
        mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 3)
###########################################################################
        devices = mvnc.enumerate_devices()
        if len(devices) == 0:
            print('No devices found')
            return 1
###########################################################################
        device = mvnc.Device(devices[0]) 
        device.open()

    #Load graph from disk and allocate graph via API
        with open(tiny_yolo_graph_file, mode='rb') as f:
            graph_from_disk = f.read()
        graph = mvnc.Graph("Tiny Yolo Graph")
############################################################################
        fifo_in, fifo_out = graph.allocate_with_fifos(device, graph_from_disk)

#may need another line here from cheat sheet

#       fifo_in, fifo_out = graph.allocate_with_fifos(device, graph_from_disk,
#           fifo_in_type=mvnc.FifoType.HOST_WO, fifo_in_data_type=mvnc.FifoDataType.FP32,
#       fifo_in_num_elem=2,
#           fifo_out_type=mvnc.FifoType.HOST_RO, fifo_out_data_type=mvnc.FifoDataType.FP32,
#       fifo_out_num_elem=2)

    # Read image from file, resize it to network width and height
    # save a copy in display_image for display, then convert to float32, normalize (divide by 255),
    # and finally convert to convert to float16 to pass to LoadTensor as input for an inference
    
        while True:
        
            frame=vs.read()# Get a frame from a video stream. 
            input_image=frame.copy()# copy frame to an input image. 
            display_image = cv2.resize(input_image, (NETWORK_IMAGE_WIDTH, NETWORK_IMAGE_HEIGHT), cv2.INTER_LINEAR)
            input_image = cv2.resize(input_image, (NETWORK_IMAGE_WIDTH, NETWORK_IMAGE_HEIGHT), cv2.INTER_LINEAR)
            input_image = input_image.astype(np.float32)
##############################################################################
            input_image = (255.0/input_image.max())
#could also be this 
	#   input_image = input_image/255
    	#   input_image[:] = ((input_image[:] )*(1.0/255.0))
            input_image = input_image[:, :, ::-1]  # convert to RGB

        #TODO: Use the queue_inference_with_fifo_elem to load the image and get the result from the NCS. This should be one line of code.
       	    graph.queue_inference_with_fifo_elem(fifo_in, fifo_out, input_image,'userobj')        

            output, userobj = fifo_out.read_elem()

        # filter out all the objects/boxes that don't meet thresholds
            filtered_objs = filter_objects(output.astype(np.float32), input_image.shape[1], input_image.shape[0])

##############################################################
	    save_box(filtered_objs,display_image)

            print('Displaying image with objects detected in GUI')
            print('Click in the GUI window and hit any key to exit')
        #display the filtered objects/boxes in a GUI window
            display_objects_in_gui(display_image, filtered_objs)

        fifo_in.destroy()
        fifo_out.destroy()
        graph.destroy()
        device.close()
        device.destroy()
	time.sleep(.5)
    print('Finished')
Exemplo n.º 28
0
class ObjectWrapper():
    mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 2)
    devices = mvnc.enumerate_devices()
    devNum = len(devices)
    if len(devices) == 0:
        print('No MVNC devices found')
        quit()
    devHandle = []
    graphHandle = []
    fifoInHandle = []
    fifoOutHandle = []

    def __init__(self, graphfile):
        select = 1
        self.detector = YoloDetector(select)
        for i in range(ObjectWrapper.devNum):
            ObjectWrapper.devHandle.append(
                mvnc.Device(ObjectWrapper.devices[i]))
            ObjectWrapper.devHandle[i].open()
            # load blob
            with open(graphfile, mode='rb') as f:
                blob = f.read()
            # create graph instance
            ObjectWrapper.graphHandle.append(mvnc.Graph('inst' + str(i)))
            # allocate resources
            fifoIn, fifoOut = ObjectWrapper.graphHandle[i].allocate_with_fifos(
                ObjectWrapper.devHandle[i], blob)
            ObjectWrapper.fifoInHandle.append(fifoIn)
            ObjectWrapper.fifoOutHandle.append(fifoOut)

        self.dim = (416, 416)
        self.blockwd = 12
        self.wh = self.blockwd * self.blockwd
        self.targetBlockwd = 13
        self.classes = 20
        self.threshold = 0.2
        self.nms = 0.4

    def __del__(self):
        for i in range(ObjectWrapper.devNum):
            ObjectWrapper.fifoInHandle[i].destroy()
            ObjectWrapper.fifoOutHandle[i].destroy()
            ObjectWrapper.graphHandle[i].destroy()
            ObjectWrapper.devHandle[i].close()

    def PrepareImage(self, img, dim):
        imgw = img.shape[1]
        imgh = img.shape[0]
        imgb = np.empty((dim[0], dim[1], 3))
        imgb.fill(0.5)

        if imgh / imgw > dim[1] / dim[0]:
            neww = int(imgw * dim[1] / imgh)
            newh = dim[1]
        else:
            newh = int(imgh * dim[0] / imgw)
            neww = dim[0]
        offx = int((dim[0] - neww) / 2)
        offy = int((dim[1] - newh) / 2)

        imgb[offy:offy + newh,
             offx:offx + neww, :] = resize(img.copy() / 255.0, (newh, neww), 1)
        im = imgb[:, :, (2, 1, 0)]
        return im, int(offx * imgw / neww), int(
            offy * imgh / newh), neww / dim[0], newh / dim[1]

    def Reshape(self, out, dim):
        shape = out.shape
        out = np.transpose(out.reshape(self.wh, int(shape[0] / self.wh)))
        out = out.reshape(shape)
        return out

    def Detect(self, img, idx=0):
        """Send image for inference on a single compute stick
           
            Args:
                img: openCV image type
                idx: index of the compute stick to use for inference
            Returns:
                [<BBox>]: array of BBox type objects for each result in the detection
        """
        imgw = img.shape[1]
        imgh = img.shape[0]

        im, offx, offy, xscale, yscale = self.PrepareImage(img, self.dim)
        #print('xscale = {}, yscale = {}'.format(xscale, yscale))

        ObjectWrapper.graphHandle[idx].queue_inference_with_fifo_elem(
            ObjectWrapper.fifoInHandle[i], ObjectWrapper.fifoOutHandle[i],
            im.astype(np.float32), 'user object')
        out, userobj = ObjectWrapper.fifoOutHandle[idx].read_elem()
        out = self.Reshape(out, self.dim)

        internalresults = self.detector.Detect(out.astype(np.float32),
                                               int(out.shape[0] / self.wh),
                                               self.blockwd, self.blockwd,
                                               self.classes, imgw, imgh,
                                               self.threshold, self.nms,
                                               self.targetBlockwd)
        pyresults = [
            BBox(x, xscale, yscale, offx, offy) for x in internalresults
        ]
        return pyresults

    def Parallel(self, img):
        """Send array of images for inference on multiple compute sticks
           
            Args:
                img: array of images to run inference on
           
            Returns:
                { <int>:[<BBox] }: A dict with key-value pairs mapped to compute stick device numbers and arrays of the detection boxs (BBox)
        """
        pyresults = {}
        for i in range(ObjectWrapper.devNum):
            im, offx, offy, w, h = self.PrepareImage(img[i], self.dim)
            ObjectWrapper.graphHandle[i].queue_inference_with_fifo_elem(
                ObjectWrapper.fifoInHandle[i], ObjectWrapper.fifoOutHandle[i],
                im.astype(np.float32), 'user object')
        for i in range(ObjectWrapper.devNum):
            out, userobj = ObjectWrapper.fifoOutHandle[i].read_elem()
            out = self.Reshape(out, self.dim)
            imgw = img[i].shape[1]
            imgh = img[i].shape[0]
            internalresults = self.detector.Detect(out.astype(np.float32),
                                                   int(out.shape[0] / self.wh),
                                                   self.blockwd, self.blockwd,
                                                   self.classes, imgw, imgh,
                                                   self.threshold, self.nms,
                                                   self.targetBlockwd)
            res = [BBox(x, w, h, offx, offy) for x in internalresults]
            if i not in pyresults:
                pyresults[i] = res
        return pyresults
Exemplo n.º 29
0
import os
import sys

dim = (227, 227)
EXAMPLES_BASE_DIR = '../../'

# ***************************************************************
# get labels
# ***************************************************************
labels_file = EXAMPLES_BASE_DIR + 'data/ilsvrc12/synset_words.txt'
labels = numpy.loadtxt(labels_file, str, delimiter='\t')

# ***************************************************************
# configure the NCS
# ***************************************************************
mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 2)

# ***************************************************************
# Get a list of ALL the sticks that are plugged in
# ***************************************************************
devices = mvnc.enumerate_devices()
if len(devices) == 0:
    print('No devices found')
    quit()

# ***************************************************************
# Pick the first stick to run the network
# ***************************************************************
device = mvnc.Device(devices[0])

# ***************************************************************
Exemplo n.º 30
0
    def __init__(self):
        super(MyMnistWindow, self).__init__()

        self.resize(284*2, 330*2)  # resize设置宽高
        self.move(100, 100)    # move设置位置 说明在哪个位置截图
        self.setWindowIcon(QIcon('./logo.ico'))
        self.setWindowTitle('计算棒测试-手写数字识别')
        self.setWindowFlags(Qt.FramelessWindowHint)  # 窗体无边框
        #setMouseTracking设置为False,否则不按下鼠标时也会跟踪鼠标事件
        self.setMouseTracking(False)

        self.pos_xy = []  #保存鼠标移动过的点

        # 添加一系列控件
        self.label_draw = QLabel('', self)

        self.label_draw.setStyleSheet("QLabel{background:rgb(255,255,255)}")
        self.label_draw.setGeometry(2, 2, 550, 550) # (x,y,width,height)
        self.label_draw.setStyleSheet("QLabel{border:1px solid black;}")
        self.label_draw.setAlignment(Qt.AlignCenter)

        self.label_result_name = QLabel('预测:', self)
        self.label_result_name.setGeometry(2, 570, 61, 35)
        self.label_result_name.setAlignment(Qt.AlignCenter)

        self.label_result = QLabel(' ', self)
        self.label_result.setGeometry(64, 570, 35, 35)
        self.label_result.setFont(QFont("Roman times", 8, QFont.Bold))
        self.label_result.setStyleSheet("QLabel{border:1px solid black;}")
        self.label_result.setAlignment(Qt.AlignCenter)

        self.btn_recognize = QPushButton("识别", self)
        self.btn_recognize.setGeometry(110, 570, 50, 35)
        self.btn_recognize.clicked.connect(self.btn_recognize_on_clicked)

        self.btn_clear = QPushButton("清空", self)
        self.btn_clear.setGeometry(170, 570, 50, 35)
        self.btn_clear.clicked.connect(self.btn_clear_on_clicked)

        self.btn_close = QPushButton("关闭", self)
        self.btn_close.setGeometry(230, 570, 50, 35)
        self.btn_close.clicked.connect(self.btn_close_on_clicked)

        # 时间
        self.label_time_name = QLabel('识别时间:', self)
        self.label_time_name.setGeometry(320, 570, 100, 35)
        self.label_time_name.setAlignment(Qt.AlignCenter)

        self.label_time = QLabel(' ', self)
        self.label_time.setGeometry(430, 570, 110, 35)
        self.label_time.setFont(QFont("Roman times", 8, QFont.Bold))
        self.label_time.setStyleSheet("QLabel{border:1px solid black;}")
        self.label_time.setAlignment(Qt.AlignCenter)

        # 计算棒信息
        self.label_ncs_name = QLabel('NCS状态:', self)
        self.label_ncs_name.setGeometry(2, 610, 100, 35)
        self.label_ncs_name.setAlignment(Qt.AlignCenter)

        self.label_ncs = QLabel(' ', self)
        self.label_ncs.setGeometry(110, 610, 430, 35)
        self.label_ncs.setFont(QFont("Roman times", 8, QFont.Bold))
        self.label_ncs.setStyleSheet("QLabel{border:1px solid black;}")
        self.label_ncs.setAlignment(Qt.AlignCenter)

        # 打开计算棒的设备
        mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL,2)
        #获取连接到主机系统的神经计算设备列表
        self.devices = mvnc.enumerate_devices()

        if len(self.devices) == 0:
            print("[INFO] 未发现计算棒的任何设备!")
            raise("[Error] No devices found!")
            # quit()

        #调用第一个NCS设备
        self.device = mvnc.Device(self.devices[0])
        print("[INFO] 打开的计算棒设备id:" + str(self.devices[0]))
        #打开通信
        self.device.open() 
        self.ncs_info = "NCS调用成功,device ID:" + str(self.devices[0]) 

        # 加载图
        with open(GRAPH_FILE,mode='rb') as f:
            self.graphFileBuff = f.read()

        #初始化一个名为graph_name的图
        self.graph = mvnc.Graph("alexnet") 

        #创建输入和输出先进先出队列,将图加载到设备
        self.input_fifo,self.output_fifo=self.graph.allocate_with_fifos(self.device,self.graphFileBuff)