Esempio n. 1
0
def detect(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45):
    # im = load_image(image, 0, 0)
    t = time.time()
    im = array_to_image(image)
    print('array_to_image time: {}').format(time.time() - t)
    dn.rgbgr_image(im)
    num = dn.c_int(0)
    pnum = dn.pointer(num)
    dn.predict_image(net, im)
    dets = dn.get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0,
                                pnum)
    num = pnum[0]
    if (nms): dn.do_nms_obj(dets, num, meta.classes, nms)

    t = time.time()
    res = []
    for j in range(num):
        for i in range(meta.classes):
            if dets[j].prob[i] > 0:
                b = dets[j].bbox
                res.append(
                    (meta.names[i], dets[j].prob[i], (b.x, b.y, b.w, b.h)))
    res = sorted(res, key=lambda x: -x[1])
    # free_image(im)
    # free_detections(dets, num)
    return res
def detect3(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45):
    if isinstance(image, bytes):
        # image is a filename
        # i.e. image = b'/darknet/data/dog.jpg'
        im = load_image(image, 0, 0)
    else:
        # image is an nparray
        # i.e. image = cv2.imread('/darknet/data/dog.jpg')
        im, image = array_to_image(image)
        dn.rgbgr_image(im)
    num = dn.c_int(0)
    pnum = dn.pointer(num)
    dn.predict_image(net, im)
    dets = dn.get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0,
                                pnum)
    num = pnum[0]
    if nms: dn.do_nms_obj(dets, num, meta.classes, nms)

    res = []
    for j in range(num):
        a = dets[j].prob[0:meta.classes]
        if any(a):
            ai = np.array(a).nonzero()[0]
            for i in ai:
                b = dets[j].bbox
                res.append(
                    (meta.names[i], dets[j].prob[i], (b.x, b.y, b.w, b.h)))

    res = sorted(res, key=lambda x: -x[1])
    if isinstance(image, bytes): free_image(im)
    dn.free_detections(dets, num)
    return res
Esempio n. 3
0
def detectOnImage(img, net, meta):
    cv_img = img.copy()
    im = array_to_image(cv_img)
    dn.rgbgr_image(im)

    r = detect2(net, meta, im)
    return r, cv_img
def slave_labor(frame):
    h, w, _ = frame.shape
    roi_array = []
    full_im, _ = darknet.array_to_image(frame)
    darknet.rgbgr_image(full_im)

    gpu_lock.acquire()
    if args.yolo:
        if w < h:
            spacing = int((h - w) / 2)
            roi_array = [(0, spacing, w, h - spacing)]
        else:
            spacing = int((w - h) / 2)
            roi_array = [(spacing, 0, w - spacing, h)]

    if not roi_array:
        roi_array = [(0, 0, w, h)]

    # TODO: remove frame_rois
    frame_rois = []

    nets = server_testing_internal['nets']
    plans = server_testing_internal['plans']
    groups = server_testing_internal['groups']
    metas = server_testing_internal['metas']

    results = []

    for i, _ in enumerate(nets):
        roi = roi_array[0]
        if args.yolo:
            # print(roi)
            frame_roi = frame[roi[1]: roi[3], roi[0]:roi[2]]
            frame_rois.append(frame_roi)
            if not args.socket and not args.interactive:
                cv.imshow("frame_roi", frame_roi)
        else:
            frame_roi = frame
        im, _ = darknet.array_to_image(frame_roi)
        darknet.rgbgr_image(im)
        r = lightnet.classify(nets[i], metas[i], im)

        top_k = args.top_k
        if top_k >= len(r):
            top_k = len(r)

        for rank in range(0, top_k):
            (label, score) = r[rank]
            results.append({
                'plan': plans[i], 
                'group': groups[i], 
                'predicate_sku': label,
                'score': score,
            })
    logger.info("|lightnet.classify")
    gpu_lock.release()

    return results
Esempio n. 5
0
def slave_labor(frame):
    h, w, _ = frame.shape
    roi_array = []
    full_im, _ = darknet.array_to_image(frame)
    darknet.rgbgr_image(full_im)

    gpu_lock.acquire()
    if args.yolo:
        if w < h:
            spacing = int((h - w) / 2)
            roi_array = [(0, spacing, w, h - spacing)]
        else:
            spacing = int((w - h) / 2)
            roi_array = [(spacing, 0, w - spacing, h)]

    if not roi_array:
        roi_array = [(0, 0, w, h)]

    preds = []

    frame_rois = []

    for i, _ in enumerate(nets):
        results = [] # cross all rois
        for roi in roi_array:
            if args.yolo:
                # print(roi)
                frame_roi = frame[roi[1]: roi[3], roi[0]:roi[2]]
                frame_rois.append(frame_roi)
                if not args.socket and not args.interactive:
                    cv.imshow("frame_roi", frame_roi)
            else:
                frame_roi = frame
            im, _ = darknet.array_to_image(frame_roi)
            darknet.rgbgr_image(im)
            r = lightnet.classify(nets[i], metas[i], im) # for single roi

            results.extend(r)
        results = sorted(results, key=lambda x: -x[1])
        for rank in range(0, args.top_k):
            (label, score) = results[rank]
            preds.append({
                'plan': '100XGROUP', # TODO: remove hardcoding
                'group': args_groups[i], 
                'predicate_sku': label,
                'score': score,
            })
    logger.info("|lightnet.classify")
    gpu_lock.release()

    return preds
Esempio n. 6
0
def pipeline(img):
    # image data transform
    # img - cv image
    # im - yolo image
    im, image = dn.array_to_image(img)
    dn.rgbgr_image(im)

    tic = time.time()
    result = detect2(net, meta, im)
    toc = time.time()
    print(toc - tic, result)

    img_final = dn.draw_boxes(img, result)
    return img_final
Esempio n. 7
0
def pipeline(img):
    # image data transform
    # img - cv image
    # im - yolo image
    im = array_to_image(img)
    dn.rgbgr_image(im)

    tic = time.time()
    result = dn.detect1(net, meta, im)

    toc = time.time()
    # print('------------------------pipeline:',toc - tic, result)
    img_final = draw_boxes(img, result)
    # img_final = img
    return img_final, result
Esempio n. 8
0
 def process_image(self, image):
     with self._lock:
         height, width, depth = image.shape
         bytes_per_line = width * depth
         # Create darknet image from OpenCV image and convert from BGR to RGB
         dimg = darknet.data_to_image(image.ctypes, width, height, depth,
                                      bytes_per_line)
         # darknet.rgbgr_image(dimg)
         # Process image with darknet
         darknet.detector_demo_process_image(self.det, dimg)
         # Convert RGB to BGR and copy into original OpenCV image
         darknet.rgbgr_image(dimg)
         darknet.copy_image_into_data(dimg, image.ctypes, width, height,
                                      depth, bytes_per_line)
         del dimg
         return image
def main(session, memProxy, robot_ip, port, camera, yoloCFG, yoloWeights,
         yoloData):

    video_service = session.service("ALVideoDevice")
    resolution = vision_definitions.kQVGA  # kQVGA =320 * 240  ,kVGA =640x480
    colorSpace = vision_definitions.kRGBColorSpace

    imgClient = video_service.subscribe("_clienteMe", resolution, colorSpace,
                                        5)

    # Select camera.
    video_service.setParam(vision_definitions.kCameraSelectID, camera)

    # Darknet ...
    net = dn.load_net(yoloCFG, yoloWeights, 0)
    meta = dn.load_meta(yoloData)

    while True:

        # get image
        result = getImage(video_service, imgClient)

        if result == None:
            print 'cannot capture.'
        elif result[6] == None:
            print 'no image data string.'
        else:

            np_img = image_qi2np(result)
            cv_img = image_np2cv(np_img)

            # send cv image to dn
            im = array_to_image(cv_img)
            dn.rgbgr_image(im)

            r = detect2(net, meta, im)

            try:

                #insertData. Value can be int, float, list, string
                memProxy.insertData("detectedObjects", r)

            except RuntimeError, e:
                # catch exception
                print "error insert data", e
Esempio n. 10
0
    def __init__(self, **properties):
        super(SensorFactory, self).__init__(**properties)
        global frame, buffer_frames, buffer_index
        ret, frame = cap.read()
        im = dn.array_to_image(frame)
        dn.rgbgr_image(im)
        buffer_frames = [im] * 3
        buffer_index = 0

        self.number_frames = 0
        self.fps = 60
        self.duration = (
            1.0 / self.fps) * Gst.SECOND  # duration of a frame in nanoseconds
        self.launch_string = ('appsrc name=source is-live=true block=true format=GST_FORMAT_TIME ' \
                             'caps=video/x-raw,format=BGR,width=320,height=240,framerate={}/1 ' \
                             '! videoconvert ! video/x-raw,format=I420 ' \
                             '! x264enc speed-preset=ultrafast tune=zerolatency ' \
                             '! rtph264pay config-interval=1 name=pay0 pt=96').format(self.fps)
Esempio n. 11
0
    def identify(self, img):
        "Get image return identification"
        #print("Received image")
        r = []
        if self.isReady:
            if img == None:
                print 'Error: invalid capture.'
            elif img[6] == None:
                print 'Error: no image data string.'
            else:
                #print("Image is correct")
                np_img = self.image_qi2np(img)
                cv_img = self.image_np2cv(np_img)

                # send cv image to dn
                im = self.array_to_image(cv_img)
                dn.rgbgr_image(im)
                #print("Detecting")
                r = self.detect2(self.net, self.meta, im)
                #print("Detection done")
                self.lastImg = img
        return r
Esempio n. 12
0
 def rgbgr_image(img):
     dn.rgbgr_image(img)
Esempio n. 13
0
 def detect(self, array, thresh):
     image = array_to_image(array)
     darknet.rgbgr_image(image)
     return detect2(self.net, self.meta, image, thresh)
    def yolo_detect(self, interval):

        time.sleep(interval)

        try:
            for i in range(24):
                self.cap.grab()

            ret, image = self.cap.read()

            if (cv2.countNonZero(cv2.cvtColor(image,
                                              cv2.COLOR_BGR2GRAY)) == 0):
                return False, "Resim Siyah"
            else:
                image_copy = image.copy()
                ##################################### Masking ############################################
                marginx = 215
                marginy = 0

                cv2.rectangle(image_copy, (0 + marginx, 0 + marginy),
                              (50 + marginx, 76 + marginy), (255, 0, 255), -1)
                cv2.rectangle(image_copy, (50 + marginx, 0 + marginy),
                              (100 + marginx, 65 + marginy), (255, 0, 255),
                              -1)  #CAM1
                cv2.rectangle(image_copy, (100 + marginx, 0 + marginy),
                              (150 + marginx, 50 + marginy), (255, 0, 255), -1)
                cv2.rectangle(image_copy, (150 + marginx, 0 + marginy),
                              (210 + marginx, 40 + marginy), (255, 0, 255), -1)

                cv2.rectangle(image_copy, (140, 420), (212, 480),
                              (255, 0, 255), -1)  #CAM7

                ##################################### Masking ############################################

                image_dn = self.array_to_image(image_copy)
                dn.rgbgr_image(image_dn)

                hour = datetime.datetime.now().hour
                dn.nnp_initialize()
                r = None

                #print("inceleniyor")

                if (hour > 7 and hour < 18):
                    r = dn.detect(self.net, self.meta, image_dn, 0.35)
                else:
                    r = dn.detect(self.net, self.meta, image_dn, 0.30)

                length = len(r)

                #print(r)

                flag = False
                if (length > 0):
                    self.DetectedImage = image
                for i in range(length):
                    if (str(r[i][0]).find("person") >= 0):
                        self.DetectedImage = cv2.rectangle(
                            self.DetectedImage,
                            (int(r[i][2][0]) - int(r[i][2][2]) - 5,
                             int(r[i][2][1]) - int(r[i][2][3]) - 5),
                            (int(r[i][2][0]) + int(r[i][2][2]) + 5,
                             int(r[i][2][1]) + int(r[i][2][3]) + 5),
                            (255, 0, 255), 2)
                        #print("Detected")
                        flag = True

                return flag, r
        except:
            #print("error")
            return False, "Usb Takili Degil"
Esempio n. 15
0
IMAGE_MODE = False

if __name__ == "__main__":
    lightnet.set_cwd(dir)

    net, meta = lightnet.load_network_meta("obj.cfg",
                                           "weights/obj_200.weights",
                                           "obj.data")
    # "../../bin/cfg/darknet19_448.cfg", "../../bin/darknet19_448.weights", "../../bin/cfg/imagenet1k.data")

    if IMAGE_MODE:
        if True:
            frame = cv.imread(lightnet.to_str('test.jpg'))
            im, arr = darknet.array_to_image(frame)
            darknet.rgbgr_image(im)
        else:
            im = darknet.load_image(
                lightnet.to_str('test.jpg').encode("ascii"), 0, 0)

        r = darknet.classify(net, meta, im)
        print(r)
    else:
        cap = cv.VideoCapture(0)
        if not cap.isOpened():
            raise Exception('Fail to open %s' % (0))
        while True:
            hasFrame, frame = cap.read()
            if not hasFrame:
                cv.waitKey()
                break
Esempio n. 16
0
def main(session, robot_ip, port, camera, yoloCFG, yoloWeights, yoloData):

    video_service = session.service("ALVideoDevice")
    resolution = vision_definitions.kQVGA  # kQVGA =320 * 240  ,kVGA =640x480
    colorSpace = vision_definitions.kRGBColorSpace

    imgClient = video_service.subscribe("_clienteMe", resolution, colorSpace,
                                        5)

    # Select camera.
    video_service.setParam(vision_definitions.kCameraSelectID, camera)

    # Darknet ...
    net = dn.load_net(yoloCFG, yoloWeights, 0)
    meta = dn.load_meta(yoloData)

    #some constants for plotting
    font = cv2.FONT_HERSHEY_SIMPLEX

    while True:

        # get image
        result = getImage(video_service, imgClient)

        if result == None:
            print 'cannot capture.'
        elif result[6] == None:
            print 'no image data string.'
        else:

            np_img = image_qi2np(result)
            cv_img = image_np2cv(np_img)

            # send cv image to dn
            im = array_to_image(cv_img)
            dn.rgbgr_image(im)

            r = detect2(net, meta, im)
            print r
            print '..........................................'

            if r != []:
                cnt = 0
                colorList = colors(len(r))
                while cnt < len(r):
                    name = r[cnt][0]
                    predict = r[cnt][1]
                    print(name + ":" + str(predict))
                    x = r[cnt][2][0]
                    y = r[cnt][2][1]
                    w = r[cnt][2][2]
                    z = r[cnt][2][3]
                    #print (x, y, w, z)

                    x_max = int(round((2 * x + w) / 2))
                    x_min = int(round((2 * x - w) / 2))
                    y_min = int(round((2 * y - z) / 2))
                    y_max = int(round((2 * y + z) / 2))
                    #print (x_min, y_min, x_max, y_max)
                    pixel_list = [x_min, y_min, x_max, y_max]
                    neg_index = [
                        pixel_list.index(val) for val in pixel_list if val < 0
                    ]
                    object_color = colorList[cnt]

                    name = name + " {:.2f}%".format(100 * predict)
                    cv2.rectangle(cv_img, (x_min, y_min), (x_max, y_max),
                                  (object_color), 2)
                    if neg_index == []:
                        cv2.rectangle(cv_img, (x_min, y_min - 24),
                                      (x_min + 10 * len(name), y_min),
                                      object_color, -1)
                        cv2.putText(cv_img, name, (x_min, y_min - 12), font,
                                    0.5, (0, 0, 0), 1, cv2.LINE_AA)
                    else:
                        if (y_min < 0 and x_min > 0):
                            cv2.rectangle(cv_img, (x_min, 0),
                                          (x_min + 10 * len(name), 24),
                                          object_color, -1)
                            cv2.putText(cv_img, name, (x_min, 12), font, 0.5,
                                        (0, 0, 0), 1, cv2.LINE_AA)
                        elif (x_min < 0 and y_min > 0):
                            cv2.rectangle(cv_img, (0, y_min - 24),
                                          (10 * len(name), y_min),
                                          object_color, -1)
                            cv2.putText(cv_img, name, (0, y_min - 12), font,
                                        0.5, (0, 0, 0), 1, cv2.LINE_AA)
                        elif (x_min < 0 and y_min < 0):
                            cv2.rectangle(cv_img, (0, 0), (10 * len(name), 24),
                                          object_color, -1)
                            cv2.putText(cv_img, name, (0, 12), font, 0.5,
                                        (0, 0, 0), 1, cv2.LINE_AA)
                    #cv2.imshow('image',cv_img)
                    #cropped = image.crop((x_min, y_min+20, x_max, y_max))
                    cnt += 1

            # show image
            cv2.imshow("pepper-camera", cv_img)
            #cv2.imwrite('pepper.png',cv_img)

        # exit by [ESC]
        if cv2.waitKey(33) == 27:
            break
            if probs[j][i] > 0:
                res.append((meta.names[i], probs[j][i], (boxes[j].x, boxes[j].y, boxes[j].w, boxes[j].h)))
    res = sorted(res, key=lambda x: -x[1])
    dn.free_ptrs(dn.cast(probs, dn.POINTER(dn.c_void_p)), num)
    return res

import sys, os
sys.path.append(os.path.join(os.getcwd(),'python/'))

import darknet as dn

# Darknet
net = dn.load_net("cfg/tiny-yolo.cfg", "tiny-yolo.weights", 0)
meta = dn.load_meta("cfg/coco.data")
r = dn.detect(net, meta, "data/dog.jpg")
print r

# scipy
arr= imread('data/dog.jpg')
im = array_to_image(arr)
r = detect2(net, meta, im)
print r

# OpenCV
arr = cv2.imread('data/dog.jpg')
im = array_to_image(arr)
dn.rgbgr_image(im)
r = detect2(net, meta, im)
print r

Esempio n. 18
0
def read_frame():
    global frame
    ret, frame = cap.read()
    im = dn.array_to_image(frame)
    dn.rgbgr_image(im)
    buffer_frames[(buffer_index + 1) % 3] = im
Esempio n. 19
0
def detect(im):
    """im: opencv format image data"""
    im = darknet.array_to_image(im)
    darknet.rgbgr_image(im)
    return darknet.detect(net, meta, im)
Esempio n. 20
0
def slave_labor(frame):
    h, w, _ = frame.shape
    roi_array = []

    gpu_lock.acquire()
    if args.yolo:
        if w < h:
            spacing = int((h - w) / 2)
            roi_array = [(0, spacing, w, h - spacing)]
        else:
            spacing = int((w - h) / 2)
            roi_array = [(spacing, 0, w - spacing, h)]

    if not roi_array:
        roi_array = [(0, 0, w, h)]

    results_hier = []
    results_flat = []

    frame_rois = []

    for i, _ in enumerate(nets):
        results = []
        for roi in roi_array:
            if args.yolo:
                # print(roi)
                frame_roi = frame[roi[1]:roi[3], roi[0]:roi[2]]
                frame_rois.append(frame_roi)
                if not args.socket and not args.interactive:
                    cv.imshow("frame_roi", frame_roi)
            else:
                frame_roi = frame
            im, _ = darknet.array_to_image(frame_roi)
            darknet.rgbgr_image(im)
            r = lightnet.classify(nets[i], metas[i], im)

            results.extend(r)
            results_flat.extend(r)
            # results = sorted(results, key=lambda x: -x[1])
        results_hier.append(results)
    logger.info("|lightnet.classify")
    gpu_lock.release()

    results_flat = sorted(results_flat, key=lambda x: -x[1])
    top_k = args.top_k
    if top_k >= len(results_flat):
        top_k = len(results_flat)

    preds = []
    for rank in range(0, top_k):
        left = 10
        top = 20 + rank * 20
        (label, score) = results_flat[rank]
        if score >= args.threshold:
            preds.append((label, score))

        text = '%s %.2f%%' % (label, score * 100)
        labelSize, baseLine = cv.getTextSize(text, cv.FONT_HERSHEY_SIMPLEX,
                                             0.5, 1)
        back_clr = (222, 222, 222)
        if score > args.gold_confidence:
            back_clr = (122, 122, 255)
        cv.rectangle(frame, (left, top - labelSize[1]),
                     (left + labelSize[0], top + baseLine), back_clr,
                     cv.FILLED)

        cv.putText(frame, text, (left, top), cv.FONT_HERSHEY_SIMPLEX, 0.5,
                   (0, 0, 0))

    if args.socket:
        if args.debug:
            now = datetime.datetime.now()
            now_string = now.strftime("%Y-%h-%d-%H-%M-%S-%f")
            image_name = 'socket_debug' + '/' + now_string + '.jpg'
            cv.imwrite(image_name, frame)
            csv_file.write(image_name)
            for results in results_hier:
                top_k = 3
                for rank in range(0, top_k):
                    (label, score) = results[rank]
                    csv_file.write(',%s,%.3f' % (label[4:], score))
            csv_file.write('\n')
            csv_file.flush()

            logger.info("|csv_file.write")

    elif args.interactive:
        pass
    else:
        cv.imshow("output", frame)

    return preds
Esempio n. 21
0
    def yolo_detect(self, interval):

        time.sleep(interval)

        try:
            for i in range(24):
                self.cap.grab()

            ret, image = self.cap.read()

            cam1 = image[0:360, 0:640]
            cam2 = image[0:360, 640:1280]
            cam3 = image[0:360, 1280:1920]

            cam4 = image[360:720, 0:640]
            cam5 = image[360:720, 640:1280]
            cam6 = image[360:720, 1280:1920]

            cam7 = image[720:1080, 0:640]
            cam8 = image[720:1080, 640:1280]

            combined_images = np.zeros((720, 1280, 3), np.uint8)
            combined_images[0:360, 0:640] = cam1
            combined_images[0:360, 640:1280] = cam2
            combined_images[360:720, 0:640] = cam4
            combined_images[360:720, 640:1280] = cam6

            image = combined_images

            if (cv2.countNonZero(cv2.cvtColor(image,
                                              cv2.COLOR_BGR2GRAY)) == 0):
                return False, "Resim Siyah"
            else:
                image_copy = image.copy()

                ##################################### Masking ############################################
                cv2.rectangle(image_copy, (600, 300), (650, 370),
                              (255, 0, 255), -1)
                #marginx = 215
                #marginy = 0

                #cv2.rectangle(image_copy, (0+marginx,0+marginy), (50+marginx, 76+marginy), (255,0,255),-1)
                #cv2.rectangle(image_copy, (50+marginx,0+marginy), (100+marginx, 65+marginy), (255,0,255),-1)   #CAM1
                #cv2.rectangle(image_copy, (100+marginx,0+marginy), (150+marginx, 50+marginy), (255,0,255),-1)
                #cv2.rectangle(image_copy, (150+marginx,0+marginy), (210+marginx, 40+marginy), (255,0,255),-1)

                #cv2.rectangle(image_copy, (315+marginx,0+marginy), (360+marginx, 75+marginy), (255,0,255),-1) #CAM3 NEW

                #cv2.rectangle(image_copy, (140,420), (212, 480), (255,0,255),-1)  #CAM7

                ##################################### Masking ############################################

                image_dn = self.array_to_image(image_copy)
                dn.rgbgr_image(image_dn)

                hour = datetime.datetime.now().hour
                dn.nnp_initialize()
                r = None

                #print("inceleniyor")

                if (hour > 7 and hour < 18):
                    r = dn.detect(self.net, self.meta, image_dn, 0.55)
                else:
                    r = dn.detect(self.net, self.meta, image_dn, 0.45)

                length = len(r)

                #print(r)

                flag = False
                if (length > 0):
                    self.DetectedImage = image
                for i in range(length):
                    if (str(r[i][0]).find("person") >= 0):
                        self.DetectedImage = cv2.rectangle(
                            self.DetectedImage,
                            (int(r[i][2][0]) - int(r[i][2][2]) - 5,
                             int(r[i][2][1]) - int(r[i][2][3]) - 5),
                            (int(r[i][2][0]) + int(r[i][2][2]) + 5,
                             int(r[i][2][1]) + int(r[i][2][3]) + 5),
                            (255, 0, 255), 2)
                        #print("Detected")
                        flag = True

                return flag, r
        except:
            #print("error")
            return False, "Usb Takili Degil"
Esempio n. 22
0
                res.append((meta.names[i], probs[j][i],
                            (boxes[j].x, boxes[j].y, boxes[j].w, boxes[j].h)))
    res = sorted(res, key=lambda x: -x[1])
    dn.free_ptrs(dn.cast(probs, dn.POINTER(dn.c_void_p)), num)
    return res


import sys, os
sys.path.append(os.path.join(os.getcwd(), 'python/'))

import darknet as dn

# Darknet
net = dn.load_net("cfg/tiny-yolo.cfg", "tiny-yolo.weights", 0)
meta = dn.load_meta("cfg/coco.data")
r = dn.detect(net, meta, "data/dog.jpg")
print(r)

# scipy
arr = imread('data/dog.jpg')
im = array_to_image(arr)
r = detect2(net, meta, im)
print(r)

# OpenCV
arr = cv2.imread('data/dog.jpg')
im = array_to_image(arr)
dn.rgbgr_image(im)
r = detect2(net, meta, im)
print(r)
def yolo_int(cv_img_array):
    im = array_to_image(cv_img_array)
    dn.rgbgr_image(im)
    toReturn = detect2(net, meta, im)
    return toReturn