Пример #1
0
def talker():

    rospy.init_node("sender", anonymous=True)
    pub_total = rospy.Publisher("/sig_info", total, queue_size=10)
    rate = rospy.Rate(5)
    a = 0
    while not rospy.is_shutdown():
        if a < 10:
            info = pubmsg()
            totaldd = total()
            info.label = 11
            info.confidence = 0.88
            info.x = 11.256
            info.y = 21.257
            info.z = 31.259
            for i in range(3):
                totaldd.result.append(info)
            rospy.loginfo(totaldd)
            #print(type(totaldd))
            rospy.loginfo(len(totaldd.result))
            pub_total.publish(totaldd)
            rate.sleep()
            a = a + 1
        else:
            time.sleep(20)
Пример #2
0
def doit():
    rate = rospy.Rate(30)
    global cv_image, color_img_update, depth_img, depth_img_update, stage, sig_info_update
    # rospy.loginfo("Lo the VideoCapture")
    # capture = cv2.VideoCapture(9)
    # rospy.loginfo("9 the VideoCapture")
    pub_total = rospy.Publisher("/total_info", total, queue_size=1)
    get_total_update = 0
    local_flag = 0
    capture = []
    open_down_cam = 0
    while not rospy.is_shutdown():
        get_total = total()
        total_dect = total()
        ############################
        # if color_img_update == 1 and depth_img_update==1 : #and sig_info_update==1
        if stage != 1:
            stage = 1
        # if stage==100:
        #     if(local_flag==0):
        #         local_flag=1
        #         for i in range(5):
        #             et, frame = capture.read()
        #     else:
        #         et, frame = capture.read()
        #     if et:
        #         if len(frame)!=0:

        #             try:
        #                  get_total=detect(total_dect,frame)
        #             except:
        #                 info=pubmsg()
        #                 info.label=88
        #                 info.confidence=0
        #                 info.x=0
        #                 info.y=0
        #                 info.z=0
        #                 get_total.result.append(info)
        #                 rospy.loginfo("stage--0   dddddddddddddddddddddddddddddddddddddddddddddd")
        #             del frame
        #             get_total_update=1
        #             et=False

        if stage == 1 and color_img_update == 1 and depth_img_update == 1:
            if len(cv_image) != 0 and len(depth_img) != 0:
                try:
                    get_total = detect(total_dect, cv_image, depth_img)
                except:
                    info = pubmsg()
                    info.label = 88
                    info.confidence = 0
                    info.x = 0
                    info.y = 0
                    info.z = 0
                    get_total.result.append(info)
                    rospy.loginfo(
                        "stage--1   dddddddddddddddddddddddddddddddddddddddddddddd"
                    )
                get_total_update = 1
                color_img_update = 0
                depth_img_update = 0
                local_flag = 0
        if stage == 2:
            info = pubmsg()
            info.label = 100
            info.confidence = 0
            info.x = 0
            info.y = 0
            info.z = 0
            get_total.result.append(info)
            get_total_update = 1
        if stage == 3:
            if open_down_cam == 0:
                rospy.loginfo("open camera")
                time.sleep(3)
                capture = cv2.VideoCapture("/dev/video20")
                open_down_cam = 1
                rospy.loginfo("camera opened success")
            if (local_flag == 0):
                local_flag = 1
                for i in range(5):
                    et, frame = capture.read()
            else:
                et, frame = capture.read()
            if et:
                if len(frame) != 0:
                    try:
                        get_total = detect(total_dect, frame)
                    except:
                        info = pubmsg()
                        info.label = 88
                        info.confidence = 0
                        info.x = 0
                        info.y = 0
                        info.z = 0
                        get_total.result.append(info)
                        rospy.loginfo(
                            "stage--3   dddddddddddddddddddddddddddddddddddddddddddddd"
                        )
                    del frame
                    get_total_update = 1
                    et = False
        if get_total_update == 1:
            get_total_update = 0
            # if len(get_total)==0:
            #         info1=pubmsg()
            #         info1.label=30
            #         info1.confidence=0
            #         info1.x=0
            #         info1.y=0
            #         info1.z=0
            #         get_total_now.result.append(info1)
            pub_total.publish(get_total)
        rate.sleep()
Пример #3
0
def detect(current_total_dect, current_image, current_depth_img=[]):
    global fx, fy, cx, cy, stage, time_stamp
    (rows, cols, channels) = current_image.shape
    # cv2.imshow("Image window", current_image)
    #cv2.waitKey(3)

    getimg = current_image.copy()
    ##ori 16~44
    lower_blue = np.array([22, 0, 0])
    upper_blue = np.array([36, 255, 255])

    getimg = np.zeros([rows, cols, channels], np.uint8)
    hsv = cv2.cvtColor(current_image, cv2.COLOR_BGR2HSV)

    mask = cv2.inRange(hsv, lower_blue, upper_blue)
    getimg = cv2.add(getimg, current_image, mask=mask)

    # # Read and pre-process input images
    n, c, h, w = net.inputs[input_blob].shape

    is_async_mode = False
    wait_key_code = 0

    cur_request_id = 0
    next_request_id = 1
    render_time = 0
    parsing_time = 0

    request_id = cur_request_id
    in_frame = cv2.resize(getimg, (w, h))

    # resize input_frame to network size
    in_frame = in_frame.transpose(
        (2, 0, 1))  # Change data layout from HWC to CHW
    in_frame = in_frame.reshape((n, c, h, w))

    try:
        exec_net.start_async(request_id=request_id,
                             inputs={input_blob: in_frame})
    except:
        info = pubmsg()
        info.label = 88
        info.confidence = 0
        info.x = 0
        info.y = 0
        info.z = 0
        current_total_dect.result.append(info)
        rospy.loginfo(
            "eddddddddddddddddddddddddddddddddddddddddddddddddddddddddd")
        return current_total_dect
    # Collecting object detection results
    objects = list()
    if exec_net.requests[cur_request_id].wait(-1) == 0:
        try:
            output = exec_net.requests[cur_request_id].outputs
        except:
            info = pubmsg()
            info.label = 88
            info.confidence = 0
            info.x = 0
            info.y = 0
            info.z = 0
            current_total_dect.result.append(info)
            rospy.loginfo(
                "eddddddddddddddddddddddddddddddddddddddddddddddddddddddddd")
            return current_total_dect

        # start_time = time()
        for layer_name, out_blob in output.items():
            layer_params = YoloV3Params(net.layers[layer_name].params,
                                        out_blob.shape[2])
            log.info("Layer {} parameters: ".format(layer_name))
            layer_params.log_params()
            objects += parse_yolo_region(out_blob, in_frame.shape[2:],
                                         getimg.shape[:-1], layer_params,
                                         args.prob_threshold)
        # parsing_time = time() - start_time

    # Filtering overlapping boxes with respect to the --iou_threshold CLI parameter
    objects = sorted(objects, key=lambda obj: obj['confidence'], reverse=True)
    for i in range(len(objects)):
        if objects[i]['confidence'] == 0:
            continue
        for j in range(i + 1, len(objects)):
            if intersection_over_union(objects[i],
                                       objects[j]) > args.iou_threshold:
                objects[j]['confidence'] = 0

    # Drawing objects with respect to the --prob_threshold CLI parameter
    objects = [
        obj for obj in objects if obj['confidence'] >= args.prob_threshold
    ]

    if len(objects) and args.raw_output_message:
        log.info("\nDetected boxes for batch {}:".format(1))
        log.info(" Class ID | Confidence | XMIN | YMIN | XMAX | YMAX | COLOR ")

    origin_im_size = getimg.shape[:-1]
    #print(objects)

    for obj in objects:
        # Validation bbox of detected object
        if obj['xmax'] > origin_im_size[1] or obj['ymax'] > origin_im_size[
                0] or obj['xmin'] < 0 or obj['ymin'] < 0:
            continue
        color = (int(min(obj['class_id'] * 15,
                         255)), min(obj['class_id'] * 20,
                                    255), min(obj['class_id'] * 25 + 100, 255))
        det_label = labels_map[obj['class_id']] if labels_map and len(labels_map) >= obj['class_id'] else \
            str(obj['class_id'])

        if args.raw_output_message:
            log.info("{:^9} | {:10f} | {:4} | {:4} | {:4} | {:4} | {} ".format(
                det_label, obj['confidence'], obj['xmin'], obj['ymin'],
                obj['xmax'], obj['ymax'], color))

        cv2.rectangle(getimg, (obj['xmin'], obj['ymin']),
                      (obj['xmax'], obj['ymax']), color, 2)
        #rospy.loginfo("detect "+det_label)
        cv2.putText(
            getimg, "#" + det_label + ' ' +
            str(round(obj['confidence'] * 100, 1)) + ' %',
            (obj['xmin'], obj['ymin'] - 7), cv2.FONT_HERSHEY_COMPLEX, 0.6,
            color, 1)

        #pos_x,pos_y,pos_z=find_pos(obj['xmin'],obj['ymin'],obj['xmax'],obj['ymax'],current_depth_img)
        x_pos = 0
        y_pos = 0
        z_pos = 0

        info = pubmsg()
        #info.label=0
        #info.confidence=0
        info.x = 0
        info.y = 0
        info.z = 0
        info.label = obj['class_id']
        # rospy.loginfo("id==="+str(info.label))
        info.confidence = round(obj['confidence'] * 100, 1)
        if stage == 1 and len(current_depth_img) != 0 and (
                obj['xmax'] - obj['xmin']) * (obj['ymax'] - obj['ymin']) >= 0:
            current_total_dect.header = time_stamp
            #get_depth_img=current_depth_img.copy()
            depth_box_width = obj['xmax'] - obj['xmin']
            depth_box_height = obj['ymax'] - obj['ymin']
            #print(depth_box_width)
            delta_rate = 0.25
            x_box_min = int(obj['xmin'] + depth_box_width * delta_rate)
            y_box_min = int(obj['ymin'] + depth_box_height * delta_rate)
            x_box_max = int(obj['xmax'] - depth_box_width * delta_rate)
            y_box_max = int(obj['ymax'] - depth_box_height * delta_rate)
            #print(x_box_min)
            after_width = (depth_box_width * (1 - 2 * delta_rate))
            after_height = (depth_box_height * (1 - 2 * delta_rate))
            bb = current_depth_img[y_box_min:y_box_max, x_box_min:x_box_max]
            z_pos = bb.sum() / (after_width * after_height) * 0.001
            #z_pos=sum(map(sum,bb))/(after_width*after_height)*0.001
            x_pos = (0.5 * (x_box_min + x_box_max) - cx) * z_pos / fx
            y_pos = (0.5 * (y_box_min + y_box_max) - cy) * z_pos / fy
            #print(x_pos)
            cv2.rectangle(getimg, (x_box_min, y_box_min),
                          (x_box_max, y_box_max), (0, 255, 255), 2)
            #"x="+str(x_pos)+" y="+str(y_pos)+
            info_pos = " z=" + str(z_pos)
            cv2.putText(getimg, info_pos, (x_box_min, y_box_min),
                        cv2.FONT_HERSHEY_COMPLEX, 0.6, (0, 255, 0), 1)
            info.x = x_pos
            info.y = y_pos
            info.z = z_pos
        elif stage != 1 and (obj['xmax'] - obj['xmin']) * (obj['ymax'] -
                                                           obj['ymin']) >= 0:
            locate_x = (obj['xmax'] + obj['xmin']) / 2
            locate_y = (obj['ymax'] + obj['ymin']) / 2
            #print(depth_box_width)
            #print(getimg.shape)
            h, w, c = getimg.shape
            info.x = locate_x - w / 2
            info.y = locate_y - h / 2
            info.z = 0
            cv2.putText(getimg,
                        (str(round(info.x)) + " " + str(round(info.y))),
                        (int(locate_x), int(locate_y)),
                        cv2.FONT_HERSHEY_COMPLEX, 0.6, (0, 255, 150), 1)
        else:
            info.confidence = 0
        current_total_dect.result.append(info)
    # sort_obj=sorted(current_total_dect.result,key=lambda obj : current_total_dect.result['label'], reverse=True)
    cv2.imshow("DetectionResults", getimg)
    #cv2.imshow("current_depth_img", current_depth_img)
    cv2.waitKey(3)
    return current_total_dect