Exemplo n.º 1
0
def image_detection(image_path, network, class_names, class_colors, thresh):
    # Darknet doesn't accept numpy images.
    # Create one with image we reuse for each detect
    width = darknet.network_width(network)
    height = darknet.network_height(network)
    darknet_image = darknet.make_image(width, height, 3)

    image = cv2.imread(image_path)
    original_width = image.shape[1]
    original_height = image.shape[0]
    x_scale = original_width / width
    y_scale = original_height / height
    image_rgb = cv2.cvtColor(image.copy(), cv2.COLOR_BGR2RGB)
    image_resized = cv2.resize(image_rgb, (width, height),
                               interpolation=cv2.INTER_LINEAR)
    
    darknet.copy_image_from_bytes(darknet_image, image_resized.tobytes())
    detections = darknet.detect_image(network, class_names, darknet_image, thresh=thresh)
    if len(detections) == 0:
        return image, detections
    temp_detections = list()
    for i in range(len(detections)):
        x_detection = detections[i][2][0] * x_scale
        y_detection = detections[i][2][1] * y_scale
        w_detection = detections[i][2][2] * x_scale
        h_detection = detections[i][2][3] * y_scale
        temp_detections.append([detections[i][0], detections[i][1], (x_detection, y_detection, w_detection, h_detection)])
    detections = temp_detections
    image = darknet.draw_boxes(detections, image, class_colors)
    return image, detections
Exemplo n.º 2
0
def inference(darknet_image_queue, detections_queue, fps_queue):
    frame_counter = 0
    output_detections = []
    while cap.isOpened():
        darknet_image = darknet_image_queue.get()
        # height, width, channels = darknet_image.size
        frame_counter += 1
        prev_time = time.time()
        detections = darknet.detect_image(network,
                                          class_names,
                                          darknet_image,
                                          thresh=args.thresh)
        detections_queue.put(detections)
        for label, confidence, bbox in detections:
            x, y, w, h = bbox
            # csv_writer.writerow([x,y,frame_counter])
            output_detections.append([x, y, frame_counter])
        # tagpath()
        # visualizepath.visualize(height, width)
        fps = int(1 / (time.time() - prev_time))
        fps_queue.put(fps)
        print("FPS: {}".format(fps))
        # darknet.print_detections(detections, args.ext_output)
        # darknet.print_detections_into_csv(detections, file, frame_counter, args.ext_output)
        darknet.free_image(darknet_image)
    output_detections = np.asarray(output_detections)
    np.savetxt(f, output_detections, delimiter=",")
    cap.release()
Exemplo n.º 3
0
def inference(darknet_image_queue, detections_queue, fps_queue):
    """
    inference the captures into the darknet.
    function is also in charge of the printing/writing (fps, caputre time, detections).
    """
    # results log
    logname = args.export_logname
    """ OPTION: """
    # each time will open a new txt file
    logname_split = args.export_logname.rsplit(".", 1)
    index = 0
    while 1:
        # name_<index>.txt
        logname = logname_split[0] + '_' + str(index) + '.' + logname_split[1]
        # file not exists
        if not os.path.isfile(logname):
            break
        # trying next index
        index += 1
        """ OPTION: END """
    f = open(logname, "w")
    enter_time_queue = [0, 0, 0]
    exit_time_queue = [1, 1, 1]
    while cap.isOpened():
        # get new image from queue
        darknet_image = darknet_image_queue.get()
        # sample entering time
        prev_time = time.time()
        enter_time_queue.pop(0)
        enter_time_queue.append(prev_time)
        # detect image (inference image in neural network)
        detections = darknet.detect_image(network,
                                          class_names,
                                          darknet_image,
                                          thresh=args.thresh)
        # store result in queue
        detections_queue.put(detections)
        # calculate fps of passing image
        fps = float(1 / (time.time() - prev_time))
        exit_time_queue.pop(0)
        exit_time_queue.append(time.time())
        # store fps in queue
        fps_queue.put(int(fps))
        # calculate the average fps of 3 last frame (just to follow up)
        fps_list = [
            1. / (m - n) for m, n in zip(exit_time_queue, enter_time_queue)
        ]
        print("Average FPS over last 3 frames is: {:.2f}".format(
            mean(fps_list)))
        # store capture time to file (in ms, for ground station)
        f.write("time: {}\n".format(str(round(capture_time_queue.get() *
                                              1000))))
        # store bbox to file
        height_ratio = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) / height
        width_ratio = cap.get(cv2.CAP_PROP_FRAME_WIDTH) / width
        darknet.print_detections(detections, height_ratio, width_ratio, f)
        f.write("\n")
    cap.release()
    f.close()
    print("\nFinished successfully, results: {}".format(logname))
    def set_frame(self):
        """Sets pixmap image to video frame"""

        if not self.online:
            self.spin(1)
            return

        if self.deque and self.online:
            # Grab latest frame
            frame = self.deque[-1]
            frame_resized = cv2.resize(frame,(540, 960),interpolation=cv2.INTER_LINEAR)
            darknet.copy_image_from_bytes(darknet_image,frame_resized.tobytes())
            detections = darknet.detect_image(netMain, metaMain, darknet_image, thresh=0.25)
            frame = self.cvDrawBoxes(detections, frame_resized)
		    #image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB 
            #ret, jpeg = cv2.imencode('.jpg', image)
        # Keep frame aspect ratio
        if self.maintain_aspect_ratio:
            self.frame = imutils.resize(frame, width=self.screen_width)
                # Force resize
        else:
            self.frame = cv2.resize(frame, (self.screen_width, self.screen_height))

        # Add timestamp to cameras
        cv2.rectangle(self.frame, (self.screen_width-190,0), (self.screen_width,50), color=(0,0,0), thickness=-1)
        cv2.putText(self.frame, datetime.now().strftime('%H:%M:%S'), (self.screen_width-185,37), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255,255,255), lineType=cv2.LINE_AA)

        # Convert to pixmap and set to video frame
        self.img = QtGui.QImage(self.frame, self.frame.shape[1], self.frame.shape[0], QtGui.QImage.Format_RGB888).rgbSwapped()
        self.pix = QtGui.QPixmap.fromImage(self.img)
        self.video_frame.setPixmap(self.pix)
Exemplo n.º 5
0
def Predictor(image, network, class_names, class_colors, confidence_thr=0.4):
    """
    Input:
         image: input 
         network: yolov4 model,the output of load_network
         class_names: the name of class,the output of load_network
         class_color: box color,the output of load_network
         confidence_thr: confidence ,default 0.4
    Output:
         return : predictor
    """
    width = image.shape[1]
    height = image.shape[0]
    darknet_image = darknet.make_image(width, height, 3)

    image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    image_resized = cv2.resize(image_rgb, (width, height),
                               interpolation=cv2.INTER_LINEAR)

    darknet.copy_image_from_bytes(darknet_image, image_resized.tobytes())
    predictor = darknet.detect_image(network,
                                     class_names,
                                     darknet_image,
                                     thresh=0.4)
    darknet.free_image(darknet_image)

    return predictor, image, image_resized
    def detect(self, image):
        # Create an image we reuse for each detect
        darknet_image = darknet.make_image(
            darknet.network_width(YoloNet.netMain),
            darknet.network_height(YoloNet.netMain), 3)
        frame_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        frame_resized = cv2.resize(
            frame_rgb,
            (
                darknet.network_width(self.netMain),  # 416
                darknet.network_height(self.netMain)),  # 416
            interpolation=cv2.INTER_LINEAR)
        darknet.copy_image_from_bytes(darknet_image, frame_resized.tobytes())

        detections = darknet.detect_image(self.netMain,
                                          self.metaMain,
                                          darknet_image,
                                          thresh=0.80)  # thresh=0.50识别的阈值
        # detections=  (b'bird', 0.9903159141540527, (172.501708984375, 188.70071411132812, 56.11371994018555, 177.9843292236328))
        # nameTag, dets[j].prob[i], (b.x, b.y, b.w, b.h)

        boxed_image, total_info = self.cvDrawBoxes(
            detections, frame_resized)  # total_info [name ,pro,[左上,右下]]

        # print('total_info=', total_info)
        boxed_image = cv2.cvtColor(boxed_image, cv2.COLOR_BGR2RGB)
        # cv2.imshow('Demo', boxed_image)
        # cv2.waitKey(1)
        return total_info, boxed_image
Exemplo n.º 7
0
    def run(self):
        print("\nDetectImage线程执行\n")
        import darknet
        config_file = self.projectPath + "yolo-obj.cfg"
        data_file = self.projectPath + "obj.data"
        weights = self.projectPath + 'backup/' + self.weight

        print(config_file)
        print(data_file)
        print(weights)
        print(self.imageName)

        print("\nnet装载\n")

        self.DetectImageSignal.emit(0, [0], {"0": 0})
        network, class_names, class_colors = darknet.load_network(
            config_file, data_file, weights)  # 获取到net

        self.DetectImageSignal.emit(1, [0], {"0": 0})

        image = darknet.load_image(self.imageName.encode("utf-8"), 0, 0)

        self.DetectImageSignal.emit(2, [0], {"0": 0})
        detections = darknet.detect_image(network, class_names, image)
        # del darknet

        self.DetectImageSignal.emit(3, detections, class_colors)

        print("\nDetectImage线程执行完毕\n")
def detection(camera_id):
    cap = cv2.VideoCapture(source_list[camera_id])
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        frame_resized = cv2.resize(frame_rgb, (width, height),
                                   interpolation=cv2.INTER_LINEAR)
        frame_queue.put(frame_resized)
        img_for_detect = darknet.make_image(width, height, 3)
        darknet.copy_image_from_bytes(img_for_detect, frame_resized.tobytes())
        darknet_image_queue.put(img_for_detect)
        darknet_image = darknet_image_queue.get()
        prev_time = time.time()
        detections = darknet.detect_image(network, class_names, darknet_image, 0.5)
        detections_queue.put(detections)
        fps = int(1 / (time.time() - prev_time))
        fps_queue.put(fps)
        #print("FPS: {}".format(fps))
        darknet.free_image(darknet_image)
        random.seed(3)  # deterministic bbox colors
        frame_resized = frame_queue.get()
        detections = detections_queue.get()
        fps = fps_queue.get()
        last_frame_list.append(get_frame(frame_resized, detections))
        last_detection_list.append(detections)
        if len(last_frame_list) == 2:
            last_frame_list.pop(0)
        if len(last_detection_list) == 2:
            last_detection_list.pop(0)
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + get_frame(frame_resized,
                                                               detections) + b'\r\n')  # concat frame one by one and show result
    def inference(
            self,
            frame_rgb):  #Function for inference i.e returning the bounding box
        self.color_code = None
        self.frame_rgb = frame_rgb
        print("self.frame_rgb.shape:", self.frame_rgb.shape)
        # self.frame_resized = cv2.resize(frame_rgb,
        #                            (darknet.network_width(self.netMain),
        #                             darknet.network_height(self.netMain)),
        #                            interpolation=cv2.INTER_LINEAR)

        darknet.copy_image_from_bytes(self.darknet_image,
                                      self.frame_rgb.tobytes())

        self.detections = darknet.detect_image(self.netMain,
                                               self.metaMain,
                                               self.darknet_image,
                                               thresh=0.25)
        print("self.detections:", self.detections)

        self.select_roi()  #Function to crop the image to BB dimensions

        #Debug Block - For counting the number of detections
        #---------------------------------------------------------------------#
        if (len(self.detections) == 0):
            global not_detected
            not_detected = not_detected + 1
        else:
            global detected
            detected = detected + 1

        image = self.cvDrawBoxes(self.frame_rgb)
        cv2.imshow('Demo', image)
        cv2.waitKey(1)
Exemplo n.º 10
0
def sign_detect(image, darknet_image):
    thresh = 0.25
    frame_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    ## 416 416
    frame_resized = cv2.resize(frame_rgb, (darknet.network_width(
        cf.netMain), darknet.network_height(cf.netMain)),
                               interpolation=cv2.INTER_LINEAR)
    darknet.copy_image_from_bytes(darknet_image, frame_resized.tobytes())
    ## name tag, confidence, box
    detections = darknet.detect_image(cf.netMain,
                                      cf.metaMain,
                                      darknet_image,
                                      thresh=thresh)
    # image,box_count = cvDrawBoxes(detections, frame_resized)
    boxes = []
    print('detections', detections)
    for detection in detections:
        # if "person" in detection[0].decode():
        x, y, w, h = detection[2][0],\
            detection[2][1],\
            detection[2][2],\
            detection[2][3]
        xmin, ymin, xmax, ymax = convertBack(float(x), float(y), float(w),
                                             float(h))
        boxes.append((int(xmin), int(ymin), int(xmax), int(ymax)))
        print('boxes', boxes)
    return boxes
Exemplo n.º 11
0
 def readFrame(self, frame_read, timestamp):
     frame_rgb = cv2.cvtColor(frame_read, cv2.COLOR_BGR2RGB)
     frame_resized = cv2.resize(frame_rgb,
                                (self.frame_width, self.frame_height),
                                interpolation=(cv2.INTER_LINEAR))
     darknet.copy_image_from_bytes(self.darknet_image,
                                   frame_resized.tobytes())
     detections = darknet.detect_image(self.network,
                                       self.class_names,
                                       self.darknet_image,
                                       thresh=0.25)
     image = self.cvDrawBoxes(detections, frame_resized, timestamp)
     image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
     self.prev_time = timestamp
     if self.showImage:
         cv2.imshow('Demo', image)
         if cv2.waitKey(1) == 27:
             exit(0)
     if self.write:
         self.out.write(image)
     result = []
     if self.plot:
         for track in self.trackedList:
             if track.lastHistory >= 5:
                 result.append([
                     track.id, track.x, track.y, track.crossSection,
                     track.velocity
                 ])
     return image, timestamp, result
Exemplo n.º 12
0
    def get_corr(self):
        network, class_names, class_colors = darknet.load_network(
            self.config_file, self.data_file, self.weight_file, batch_size=1)
        width = darknet.network_width(network)
        height = darknet.network_height(network)
        darknet_image = darknet.make_image(width, height, 3)

        frame = cv2.imread(self.image_path)
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        frame_resized = cv2.resize(frame_rgb, (width, height),
                                   interpolation=cv2.INTER_LINEAR)
        darknet.copy_image_from_bytes(darknet_image, frame_resized.tobytes())

        detections = darknet.detect_image(network,
                                          class_names,
                                          darknet_image,
                                          thresh=self.thresh_hold,
                                          hier_thresh=.5,
                                          nms=.45)

        voc = []
        for label, prob, corr in detections:
            i = self.bbox2points(corr)
            voc.append(i)
        return voc
Exemplo n.º 13
0
    def get_multi_corr(self, image_folder):
        network, class_names, class_colors = darknet.load_network(
            self.config_file, self.data_file, self.weight_file, batch_size=1)
        width = darknet.network_width(network)
        height = darknet.network_height(network)
        darknet_image = darknet.make_image(width, height, 3)

        infer_images = glob(os.path.join(image_folder, '*.jpg'))
        #print(infer_images)
        voc_rst = []
        for img in infer_images:
            frame = cv2.imread(img)
            frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            frame_resized = cv2.resize(frame_rgb, (width, height),
                                       interpolation=cv2.INTER_LINEAR)
            darknet.copy_image_from_bytes(darknet_image,
                                          frame_resized.tobytes())

            detections = darknet.detect_image(network,
                                              class_names,
                                              darknet_image,
                                              thresh=self.thresh_hold,
                                              hier_thresh=.5,
                                              nms=.45)

            voc = []
            for label, prob, corr in detections:
                i = self.bbox2points(corr)
                voc.append(i)

            voc_rst.append({'image_name': img, 'corr': voc})
        return voc_rst
Exemplo n.º 14
0
def image_detection(image_path, network, class_names, class_colors, thresh):
    # Darknet doesn't accept numpy images.
    # Create one with image we reuse for each detect
    width = darknet.network_width(network)
    height = darknet.network_height(network)
    darknet_image = darknet.make_image(width, height, 3)

    image = cv2.imread(image_path)
    image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    image_resized = cv2.resize(image_rgb, (width, height),
                               interpolation=cv2.INTER_LINEAR)

    darknet.copy_image_from_bytes(darknet_image, image_resized.tobytes())
    detections = darknet.detect_image(network,
                                      class_names,
                                      darknet_image,
                                      thresh=thresh)
    new_dets = []
    for det in detections:
        new_x1 = det[2][0] * image.shape[1] / width
        new_y1 = det[2][1] * image.shape[0] / height
        new_x2 = det[2][2] * image.shape[1] / width
        new_y2 = det[2][3] * image.shape[0] / height
        new_det = (new_x1, new_y1, new_x2, new_y2)
        new_dets.append((det[0], det[1], new_det))
    darknet.free_image(darknet_image)

    image = darknet.draw_boxes(new_dets, image, class_colors)
    return cv2.cvtColor(image, cv2.COLOR_BGR2RGB), new_dets
Exemplo n.º 15
0
def YOLO_detection(image, darknet_image):
    frame_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    timer.start()
    darknet.copy_image_from_bytes(darknet_image, frame_rgb.tobytes())
    detections = darknet.detect_image(netMain,
                                      metaMain,
                                      darknet_image,
                                      thresh=0.25)
    z_box_list = []
    object_type_list = []
    labels = 0
    for i, detection in enumerate(detections):
        object_type = detection[0].decode()
        confidence = detection[1]
        labels += 1
        x, y, w, h = detection[2][0], \
                     detection[2][1], \
                     detection[2][2], \
                     detection[2][3]
        left, top, rights, bottom = convertBack(float(x), float(y), float(w),
                                                float(h))
        box = np.array([top, left, bottom, rights])
        z_box_list.append(box)
        type_and_confidence = [object_type, confidence]
        object_type_list.append(type_and_confidence)
    interval = timer.end()
    print('image1 Time: {:.6f}s, Detect Objects: {:d}.'.format(
        interval, labels))
    return z_box_list, labels, object_type_list
def yolo_detect(frame_read, center_box, yolo_fail):
    frame_rgb = cv2.cvtColor(frame_read, cv2.COLOR_BGR2RGB)
    frame_resized = cv2.resize(
        frame_rgb,
        (darknet.network_width(netMain), darknet.network_height(netMain)),
        interpolation=cv2.INTER_LINEAR)
    darknet.copy_image_from_bytes(darknet_image, frame_resized.tobytes())
    detections = darknet.detect_image(netMain,
                                      metaMain,
                                      darknet_image,
                                      thresh=0.30)

    for detection in detections:
        #if detection[0] == b'car' or detection[0] == b'truck' or detection[0] == b'bus':
        x, y, w, h = detection[2][0] * width / darknet.network_width(netMain),\
            detection[2][1] * height / darknet.network_height(netMain),\
            detection[2][2] * width / darknet.network_width(netMain),\
            detection[2][3] * height / darknet.network_height(netMain)
        xmin, ymin, xmax, ymax = convertBack(float(x), float(y), float(w),
                                             float(h))

        yolo_bbox = [xmin, ymin, xmax, ymax]
        bbox_iou = bb_intersection_over_union(center_box, yolo_bbox)
        if bbox_iou > 0:
            iou.append(bbox_iou)
            bbox_list.append(yolo_bbox)

    if len(bbox_list) != 0:
        #cur_bbox=bbox_list[iou.index(max(iou))]
        yolo_fail = False
        return bbox_list[iou.index(max(iou))], yolo_fail
    else:
        yolo_fail = True
        return [0, 0, 0, 0], yolo_fail
Exemplo n.º 17
0
    def detect(self, frame):
        darknet_image = dn.make_image(self.width, self.height, 3)
        img_resized = cv2.resize(frame, (self.width, self.height),
                                 interpolation=cv2.INTER_LINEAR)

        # get image ratios to convert bounding boxes to proper size
        img_height, img_width, _ = frame.shape
        width_ratio = img_width / self.width
        height_ratio = img_height / self.height

        # run model on darknet style image to get detections
        dn.copy_image_from_bytes(darknet_image, img_resized.tobytes())
        detections = dn.detect_image(self.network, self.class_names,
                                     darknet_image)
        dn.free_image(darknet_image)

        results = []
        for label, confidence, bbox in detections:
            if float(confidence) <= 98.0:
                continue

            left, top, right, bottom = dn.bbox2points(bbox)
            left, top, right, bottom = int(left * width_ratio), int(top * height_ratio), \
                                       int(right * width_ratio), int(bottom * height_ratio)

            results.append((confidence, (left, top, right, bottom)))

        if len(results) > 0:
            return max(results)[1]
        else:
            return None
    def predict(self, img_array, thresh=0.5, hier_thresh=0.5, nms=0.45):
        """
        Observations: Assert that the image have the correct shapes.
        """
        dn.copy_image_from_bytes(self._darknet_image, img_array.tobytes())

        detections = dn.detect_image(self._net, self._metadata,
                                     self._darknet_image, thresh, hier_thresh,
                                     nms)

        predictions = []
        for detection in detections:
            x, y, w, h = detection[2]
            xmin, ymin, xmax, ymax = convertBack(x, y, w, h)
            if xmax - xmin > 0.9 or ymax - ymin > 0.9:
                continue
            name = str(detection[0], encoding="ascii")
            if name != "person":
                continue
            predictions.append({
                "name": name,
                "confidence": detection[1],
                "box": (xmin, ymin, xmax, ymax),
            })

        return predictions
Exemplo n.º 19
0
def detect_box(color_image):
    prev_time = time.time()
    #color_image=cv2.imread("./yolo.jpg")
    #ret, frame_read = cap.read()
    """frames = pipeline.wait_for_frames()
    color_frame = frames.get_color_frame()
    #if  not color_frame:
        #continue

    color_image = np.asanyarray(color_frame.get_data())
    img = color_image
    h, w, ch = color_image.shape
    bytesPerLine = ch * w 
    cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR, color_image)"""

    frame_rgb = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
    frame_resized = cv2.resize(
        frame_rgb,
        (darknet.network_width(netMain), darknet.network_height(netMain)),
        interpolation=cv2.INTER_LINEAR)

    darknet.copy_image_from_bytes(darknet_image, frame_resized.tobytes())
    print("start detect")
    #這一行會回傳偵測到的內容, 資料格式請看readme
    detections = darknet.detect_image(netMain,
                                      metaMain,
                                      darknet_image,
                                      thresh=0.25)
    print("finsh detect")
    print(detections)
    return detections

    for detection in detections:
        x, y, w, h = detection[2][0],\
            detection[2][1],\
            detection[2][2],\
            detection[2][3]
        xmin, ymin, xmax, ymax = convertBack(float(x), float(y), float(w),
                                             float(h))

        resizeImg = [
            color_image.shape[0] / darknet.network_width(netMain),
            color_image.shape[1] / darknet.network_height(netMain)
        ]
        print("[" + str(color_image.shape[0]) + ", " +
              str(color_image.shape[1]) + "]")
        print("[" + str(darknet.network_width(netMain)) + ", " +
              str(darknet.network_height(netMain)) + "]")
        print(str(xmin) + "," + str(ymin) + "," + str(xmax) + "," + str(ymax))
        print("-----------------------------------------")
        print(
            str(xmin * resizeImg[0]) + "," + str(ymin * resizeImg[1]) + "," +
            str(xmax * resizeImg[0]) + "," + str(ymax * resizeImg[1]))

    image = cvDrawBoxes(detections, color_image)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    print(time.time() - prev_time)
    while (True):
        cv2.imshow('Demo', image)
        cv2.waitKey(3)
Exemplo n.º 20
0
def detect():
    '''
    Returns the detections made by darknet and a ros detection message type
    '''

    darknet.copy_image_from_bytes(_darknet_image, _resized_frame.tobytes())

    detections = darknet.detect_image(_netMain,
                                      _metaMain,
                                      _darknet_image,
                                      thresh=0.25)

    detection_array = DetectionArray()

    for d in detections:
        box = Detection()

        box.label = d[0].decode()

        box.x = d[2][0] / darknet.network_width(_netMain)
        box.y = d[2][1] / darknet.network_height(_netMain)
        box.w = d[2][2] / darknet.network_width(_netMain)
        box.h = d[2][3] / darknet.network_height(_netMain)

        detection_array.boxes.append(box)

    return detections, detection_array
Exemplo n.º 21
0
    def callback(self, msg):
        begin = time.clock()
        frame = self.bridge.imgmsg_to_cv2(msg, "bgr8")
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        #cv2.imshow("frame_rgb", frame_rgb)
        frame_resized = cv2.resize(frame_rgb, (self.width, self.height),
                                   interpolation=cv2.INTER_LINEAR)
        #cv2.imshow("frame_resized", frame_resized)
        img_for_detect = darknet.make_image(self.width, self.height, 3)
        darknet.copy_image_from_bytes(img_for_detect, frame_resized.tobytes())
        #print(img_for_detect.__dict__)
        detections = darknet.detect_image(self.network,
                                          self.class_names,
                                          img_for_detect,
                                          thresh=self.thresh)
        #print(detections)

        image = darknet.draw_boxes(detections, frame_resized,
                                   self.class_colors)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        end = time.clock()
        print(end - begin)

        cv2.imshow('Inference', image)
        cv2.imshow("Image Window", frame)
        cv2.waitKey(3)
Exemplo n.º 22
0
def image_detection(image_path, network, class_names, class_colors, thresh):
    # Darknet doesn't accept numpy images.
    # Create one with image we reuse for each detect
    width = darknet.network_width(network)
    height = darknet.network_height(network)
    darknet_image = darknet.make_image(width, height, 3)

    image = cv2.imread(image_path)
    image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    image_resized = cv2.resize(image_rgb, (width, height),
                               interpolation=cv2.INTER_LINEAR)

    darknet.copy_image_from_bytes(darknet_image, image_resized.tobytes())
    detections = darknet.detect_image(network, class_names, darknet_image, thresh=thresh)
    image = darknet.draw_boxes(detections, image_resized, class_colors)
    return cv2.cvtColor(image, cv2.COLOR_BGR2RGB), detections
    if class_names == "fresh":
        fresh = 0
    if class_names == "ripe":
        ripe = 0
    if class_names == "raw":
        raw = 0
    if class_names == "flowering":
        flowering = 0
    if class_names == "alternaria":
        alternaria = 0
    if class_names == "cedar":
        cedar = 0
    if class_names == "fire-blight":
        fire_blight = 0
    if class_names == "leaf-roller":
        leaf_roller = 0
    if class_names == "fungal":
        fungal = 0
    def work(self):
        frame = self.read()

        fps_str = "FPS : %0.1f" % self.fps
        cv2.putText(frame, fps_str, (0, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

        # gelen frame bos ise while sonlaniyor
        #if not grabbed:
        #if np.shape(frame) == ():
        #    break

        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        frame_resized = cv2.resize(frame_rgb,
                                   (darknet.network_width(vs.netMain), darknet.network_height(vs.netMain)),
                                   interpolation=cv2.INTER_LINEAR)

        imageToShowingCorped = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2RGB)

        darknet.copy_image_from_bytes(vs.darknet_image, frame_resized.tobytes())

        detections = darknet.detect_image(vs.netMain, vs.metaMain, vs.darknet_image, thresh=0.25)

        #print(vs.darknet_image)

        #nparr = np.fromstring(img_str, np.uint8)
        #img1 = cv2.imdecode(nparr, cv2.CV_LOAD_IMAGE_COLOR) # cv2.IMREAD_COLOR in OpenCV 3.1

        image = vs.cvDrawBoxes(detections, frame_resized)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        tforstuff = Thread(target=self.StuffForCroped(detections, imageToShowingCorped))
        tforstuff.start()

        cv2.imshow('Showing I4U Video With Thread', image)
Exemplo n.º 24
0
def guardarpkl(inputvideo):
    import pickle
    path_data = os.getcwd()
    carpeta = 'cooke_1211'
    config_file = join(path_data, glob.glob(join(path_data, carpeta + '/*.cfg'))[0])
    data_file = join(path_data, glob.glob(join(path_data, carpeta + '/*.data'))[0])
    weights = join(path_data, glob.glob(join(path_data, carpeta + '/*.weights'))[0])
    network, class_names, class_colors = darknet.load_network(config_file, data_file, weights, batch_size=1)
    width = darknet.network_width(network)
    height = darknet.network_height(network)
    darknet_image = darknet.make_image(width, height, 3)
    for s in inputvideo:
        input_source = join(path_data, s)
        if isfile(input_source):
            print("generando pkl para..."+input_source)
            cap = cv2.VideoCapture(input_source)
            detecciones = [] #[[width, height, config_file, data_file, weights]]
            while cap.isOpened():
                ret, frame = cap.read()
                if not ret:
                    break
                frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                image = cv2.resize(frame_rgb, (416, 416),interpolation=cv2.INTER_LINEAR)
                darknet.copy_image_from_bytes(darknet_image, image.tobytes())
                detections = darknet.detect_image(network, class_names, darknet_image,thresh=0.1)
                detecciones.append(detections)
            pickle.dump(detecciones, open(splitext(input_source)[0] + '.pkl', 'wb'))
            print("pkl generado...")
        else:
            print("error: archivo no existe")
Exemplo n.º 25
0
def image_detection_original_no_image(image, network, class_names, thresh, fx,
                                      fy):
    global width, height
    darknet_image = darknet.make_image(width, height, 3)
    image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    image_resized = cv2.resize(image_rgb, (width, height),
                               interpolation=cv2.INTER_LINEAR)
    darknet.copy_image_from_bytes(darknet_image, image_resized.tobytes())
    detections = darknet.detect_image(network,
                                      class_names,
                                      darknet_image,
                                      thresh=thresh)
    for i in range(len(detections)):
        detections[i] = list(detections[i])
        detections[i][2] = list(detections[i][2])
        detections[i][2][0] = (width / 2 / fx) - (1 / fx) * (width / 2 - detections[i][2][0]) \
            if detections[i][2][0] <= width / 2 \
            else (1 / fx) * (detections[i][2][0] - width / 2) + (width / 2 / fx)
        detections[i][2][1] = (height / 2 / fy) - (1 / fy) * (height / 2 - detections[i][2][1]) \
            if detections[i][2][1] <= height / 2 \
            else (1 / fy) * (detections[i][2][1] - height / 2) + (height / 2 / fy)
        detections[i][2][2] /= fx
        detections[i][2][3] /= fy
    darknet.free_image(darknet_image)
    return detections
Exemplo n.º 26
0
 def detect(self):
     while True:
         self.frame_count = self.frame_count + 1
         detections = None
         start = time.time()
         frame_rgb = cv2.cvtColor(self.frame_blend, cv2.COLOR_BGR2RGB)
         frame_resized = cv2.resize(frame_rgb, (darknet.network_width(
             self.net_main), darknet.network_height(self.net_main)),
                                    interpolation=cv2.INTER_LINEAR)
         if self.frame_count % self.step_frame == 0:
             darknet.copy_image_from_bytes(self.darknet_image,
                                           frame_resized.tobytes())
             ## name tag, confidence, box
             detections = darknet.detect_image(self.net_main,
                                               self.meta_main,
                                               self.darknet_image,
                                               thresh=self.thresh)
             self.frame_count = 0
         end = time.time()
         fps_cur = 1 / (end - start)
         self.fps_detect = fps_cur if self.fps_detect == 0 else (
             self.fps_detect * 0.95 + fps_cur * 0.05)
         image, box_count = cv_draw_boxes(detections, frame_resized)
         self.number_boxes = self.number_boxes + box_count
         fps_str = "{:.2f}".format(self.fps_detect)
         cv2.putText(image, fps_str, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1,
                     (0, 255, 0), 2, cv2.LINE_AA)
         self.frame_detect = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
Exemplo n.º 27
0
    def image_callback(self, msg):
        self.image = np.fromstring(msg.data,
                                   dtype=np.uint8).reshape(320, 480, 3)  #avm
        # self.image = np.fromstring(msg.data, dtype = np.uint8).reshape(480, 640, 3)#front
        prev_time = time.time()

        #========================================================================================================msmsmsms
        frame_resized = cv2.resize(self.image, (darknet.network_width(
            self.netMain), darknet.network_height(self.netMain)),
                                   interpolation=cv2.INTER_LINEAR)

        darknet.copy_image_from_bytes(self.darknet_image,
                                      frame_resized.tobytes())
        detections = darknet.detect_image(self.netMain,
                                          self.metaMain,
                                          self.darknet_image,
                                          thresh=0.25)

        cands = PoseArray()  # [x, y, heading, velocity]
        for detection in detections:
            temp = Pose()
            temp.position.x = detection[2][0]
            temp.position.y = detection[2][1]
            temp.position.z = detection[1]
            cands.poses.append(temp)
        self.parking_cand_pub.publish(cands)
        # #========================================================================================================
        # print("frame: ", 1/(time.time()-prev_time))

        image = cvDrawBoxes(detections, frame_resized)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        cv2.imshow('Demo', image)
        cv2.waitKey(3)
Exemplo n.º 28
0
def LP_detection(image, network, class_names, class_colors, thresh):
    # Darknet doesn't accept numpy images.
    # Create one with image we reuse for each detect

    origin_height, origin_width, _ = image.shape

    width = darknet.network_width(network)
    height = darknet.network_height(network)
    darknet_image = darknet.make_image(width, height, 3)

    # image = cv2.imread(image_path)
    image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    image_resized = cv2.resize(image_rgb, (width, height),
                               interpolation=cv2.INTER_LINEAR)

    darknet.copy_image_from_bytes(darknet_image, image_resized.tobytes())
    detections = darknet.detect_image(network,
                                      class_names,
                                      darknet_image,
                                      thresh=thresh)

    box = []
    for label, confidence, bbox in detections:
        left, top, right, bottom = bbox2points(bbox)
        box.append(int(left * (origin_width / width)))
        box.append(int(top * (origin_height / height)))
        box.append(int(right * (origin_width / width)))
        box.append(int(bottom * (origin_height / height)))

    return image, box
def YOLO(image_list):

    global altNames
    configPath = "./cfg/yolov4.cfg"
    weightPath = "./yolov4.weights"
    metaPath = "./cfg/coco.data"
    if not os.path.exists(configPath):
        raise ValueError("Invalid config path `" +
                         os.path.abspath(configPath)+"`")
    if not os.path.exists(weightPath):
        raise ValueError("Invalid weight path `" +
                         os.path.abspath(weightPath)+"`")
    if not os.path.exists(metaPath):
        raise ValueError("Invalid data file path `" +
                         os.path.abspath(metaPath)+"`")
    network,class_names,class_colors=darknet.load_network(configPath,metaPath,weightPath,batch_size=1)
    if altNames is None:
        try:
            with open(metaPath) as metaFH:
                metaContents = metaFH.read()
                import re
                match = re.search("names *= *(.*)$", metaContents,
                                  re.IGNORECASE | re.MULTILINE)
                if match:
                    result = match.group(1)
                else:
                    result = None
                try:
                    if os.path.exists(result):
                        with open(result) as namesFH:
                            namesList = namesFH.read().strip().split("\n")
                            altNames = [x.strip() for x in namesList]
                except TypeError:
                    pass
        except Exception:
            pass
    i = 0
    while True:
        image = cv2.imread(image_list[i])
        width = image.shape[1]
        height = image.shape[0]

        # Create an image we reuse for each detect
        darknet_image = darknet.make_image(width, height, 3)

        image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        image_rgb = cv2.resize(image_rgb,
                                       (width, height),
                                       interpolation=cv2.INTER_LINEAR)

        darknet.copy_image_from_bytes(darknet_image, image_rgb.tobytes())

        detections = darknet.detect_image(network, class_names, darknet_image, thresh=0.25)
        image = cvDrawBoxes(detections, image_rgb)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        cv2.imshow('Output', image)
        cv2.waitKey(0)
        i += 1
    cv2.destroyAllWindows()
Exemplo n.º 30
0
    def drone_detect(self, frame):
        prev_time = time.time()
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        darknet.copy_image_from_bytes(self.darknet_image,frame_rgb.tobytes())
        detections = darknet.detect_image(self.netMain, self.metaMain, self.darknet_image, thresh=0.25)
        print((time.time()-prev_time))

        return detections
Exemplo n.º 31
0
def YOLO():

    global metaMain, netMain, altNames
    configPath = "./cfg/yolov3.cfg"
    weightPath = "./yolov3.weights"
    metaPath = "./cfg/coco.data"
    if not os.path.exists(configPath):
        raise ValueError("Invalid config path `" +
                         os.path.abspath(configPath)+"`")
    if not os.path.exists(weightPath):
        raise ValueError("Invalid weight path `" +
                         os.path.abspath(weightPath)+"`")
    if not os.path.exists(metaPath):
        raise ValueError("Invalid data file path `" +
                         os.path.abspath(metaPath)+"`")
    if netMain is None:
        netMain = darknet.load_net_custom(configPath.encode(
            "ascii"), weightPath.encode("ascii"), 0, 1)  # batch size = 1
    if metaMain is None:
        metaMain = darknet.load_meta(metaPath.encode("ascii"))
    if altNames is None:
        try:
            with open(metaPath) as metaFH:
                metaContents = metaFH.read()
                import re
                match = re.search("names *= *(.*)$", metaContents,
                                  re.IGNORECASE | re.MULTILINE)
                if match:
                    result = match.group(1)
                else:
                    result = None
                try:
                    if os.path.exists(result):
                        with open(result) as namesFH:
                            namesList = namesFH.read().strip().split("\n")
                            altNames = [x.strip() for x in namesList]
                except TypeError:
                    pass
        except Exception:
            pass
    #cap = cv2.VideoCapture(0)
    cap = cv2.VideoCapture("test.mp4")
    cap.set(3, 1280)
    cap.set(4, 720)
    out = cv2.VideoWriter(
        "output.avi", cv2.VideoWriter_fourcc(*"MJPG"), 10.0,
        (darknet.network_width(netMain), darknet.network_height(netMain)))
    print("Starting the YOLO loop...")

    # Create an image we reuse for each detect
    darknet_image = darknet.make_image(darknet.network_width(netMain),
                                    darknet.network_height(netMain),3)
    while True:
        prev_time = time.time()
        ret, frame_read = cap.read()
        frame_rgb = cv2.cvtColor(frame_read, cv2.COLOR_BGR2RGB)
        frame_resized = cv2.resize(frame_rgb,
                                   (darknet.network_width(netMain),
                                    darknet.network_height(netMain)),
                                   interpolation=cv2.INTER_LINEAR)

        darknet.copy_image_from_bytes(darknet_image,frame_resized.tobytes())

        detections = darknet.detect_image(netMain, metaMain, darknet_image, thresh=0.25)
        image = cvDrawBoxes(detections, frame_resized)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        print(1/(time.time()-prev_time))
        cv2.imshow('Demo', image)
        cv2.waitKey(3)
    cap.release()
    out.release()