示例#1
0
    def __getitem__(self, idx):
        label = np.zeros((self.S, self.S, self.B * 5 + 13))

        xml_path = self.annotation_path[idx]
        img_path = self.images_path[idx]

        xml_data = parse_xml(xml_path)
        img_size = xml_data["size"]

        image, ratio_x, ratio_y = self._image_processing(img_path, xml_data)

        for i, ob in enumerate(xml_data["object"]):
            object_class = np.zeros(13)
            x, y, cell_x, cell_y, w, h = self._get_reat_axis(
                ob["bndbox"], ratio_x, ratio_y)

            # Each cell has B bounding box
            b_id = self.kmeans.predict([[w, h]])[0]
            label[cell_x, cell_y,
                  b_id * 5:(b_id + 1) * 5] = np.array([x, y, w, h, 1])

            object_name = ob["name"]
            object_class[self.classes.index(object_name)] = 1

            label[cell_x, cell_y, -13:] = object_class

        sample = {"image": np.array(image), "label": label}

        if self.transform:
            sample = self.transform(sample)

        return sample
def calculate_anchor(input_size, train_annot_folder, saved_kmean_name):
    annot_path_list = [
        join(train_annot_folder, f) for f in listdir(train_annot_folder)
        if isfile(join(train_annot_folder, f))
    ]
    annot_path_list.sort()

    kmean_list = []
    for annot_name in annot_path_list:
        xml_data = parse_xml(annot_name)

        ratio_x, ratio_y = get_ratio(input_size, xml_data)
        for ob in xml_data["object"]:
            w, h = get_wh(input_size, ob["bndbox"], ratio_x, ratio_y)
            kmean_list.append([w, h])

    kmean_list = np.asarray(kmean_list)
    kmeans = KMeans(n_clusters=5, random_state=0).fit(kmean_list)
    pickle.dump(kmeans, open(saved_kmean_name, 'wb'))
def parse_annotation_jpeg(annotation_path, jpeg_path, gs):
    """
    获取正负样本(注:忽略属性difficult为True的标注边界框)
    正样本:候选建议与标注边界框IoU大于等于0.5
    负样本:IoU大于0,小于0.5。为了进一步限制负样本数目,其大小必须大于标注框的1/5
    """
    img = cv2.imread(jpeg_path)

    selectivesearch.config(gs, img, strategy='q')
    # 计算候选建议
    rects = selectivesearch.get_rects(gs)
    # 获取标注边界框
    bndboxs = parse_xml(annotation_path)

    # 标注框大小
    maximum_bndbox_size = 0
    for bndbox in bndboxs:
        xmin, ymin, xmax, ymax = bndbox
        bndbox_size = (ymax - ymin) * (xmax - xmin)
        if bndbox_size > maximum_bndbox_size:
            maximum_bndbox_size = bndbox_size

    # 获取候选建议和标注边界框的IoU
    iou_list = compute_ious(rects, bndboxs)

    positive_list = list()
    negative_list = list()
    for i in range(len(iou_list)):
        xmin, ymin, xmax, ymax = rects[i]
        rect_size = (ymax - ymin) * (xmax - xmin)

        iou_score = iou_list[i]
        if iou_list[i] >= 0.5:
            # 正样本
            positive_list.append(rects[i])
        if 0 < iou_list[i] < 0.5 and rect_size > maximum_bndbox_size / 5.0:
            # 负样本
            negative_list.append(rects[i])
        else:
            pass

    return positive_list, negative_list
示例#4
0
def parse_annotation_jpeg(annotation_path, jpeg_path, gs):
    """
    获取正负样本(注:忽略属性difficult为True的标注边界框)
    正样本:候选建议与标注边界框IoU大于等于0.5 + 标注边界框
    负样本:IoU大于0.1,小于0.5
    """
    img = cv2.imread(jpeg_path)

    selectivesearch.config(gs, img, strategy='q')
    # 计算候选建议
    rects = selectivesearch.get_rects(gs)
    # 获取标注边界框
    bndboxs = parse_xml(annotation_path)

    # 获取候选建议和标注边界框的IoU
    iou_list = compute_ious(rects, bndboxs)

    positive_list = list()
    negative_list = list()
    for i in range(len(iou_list)):
        xmin, ymin, xmax, ymax = rects[i]
        rect_size = (ymax - ymin) * (xmax - xmin)

        iou_score = iou_list[i]
        if iou_score >= 0.5:
            # 正样本
            positive_list.append(rects[i])
        if 0.1 <= iou_score < 0.5:
            # 负样本
            negative_list.append(rects[i])
        else:
            pass

    # 添加标注边界框到正样本列表
    positive_list.extend(bndboxs)

    return positive_list, negative_list
    device = get_device()
    transform = get_transform()
    model = get_model(device=device)

    # 创建selectivesearch对象
    gs = selectivesearch.get_selective_search()

    # test_img_path = '../imgs/000007.jpg'
    # test_xml_path = '../imgs/000007.xml'
    test_img_path = '../imgs/000012.jpg'
    test_xml_path = '../imgs/000012.xml'

    img = cv2.imread(test_img_path)
    dst = copy.deepcopy(img)

    bndboxs = util.parse_xml(test_xml_path)
    for bndbox in bndboxs:
        xmin, ymin, xmax, ymax = bndbox
        cv2.rectangle(dst, (xmin, ymin), (xmax, ymax),
                      color=(0, 255, 0),
                      thickness=1)

    # 候选区域建议
    selectivesearch.config(gs, img, strategy='f')
    rects = selectivesearch.get_rects(gs)
    print('候选区域建议数目: %d' % len(rects))

    # softmax = torch.softmax()

    svm_thresh = 0.60
示例#6
0
if __name__ == '__main__':
    device = get_device()
    transform = get_transform()
    model = get_model(device=device)

    # 创建selectivesearch对象
    gs = selectivesearch.get_selective_search()

    test_img_path = './data/voc_car/val/JPEGImages/000007.jpg'
    test_xml_path = './data/voc_car/val/Annotations/000007.xml'

    img = cv2.imread(test_img_path)
    dst = copy.deepcopy(img)

    bndboxs = parse_xml(test_xml_path)
    for bndbox in bndboxs:
        xmin, ymin, xmax, ymax = bndbox
        cv2.rectangle(dst, (xmin, ymin), (xmax, ymax), color=(0, 255, 0), thickness=1)

    # 候选区域建议
    selectivesearch.config(gs, img, strategy='f')
    rects = selectivesearch.get_rects(gs)
    print('候选区域建议数目: %d' % len(rects))

    # softmax = torch.softmax()

    # 保存正样本边界框以及
    score_list = list()
    positive_list = list()
    for rect in rects:
    util.check_dir(dst_root_dir)
    util.check_dir(dst_jpeg_dir)
    util.check_dir(dst_bndbox_dir)
    util.check_dir(dst_positive_dir)

    samples = util.parse_car_csv(voc_car_train_dir)
    res_samples = list()
    total_positive_num = 0
    for sample_name in samples:
        # 提取正样本边界框坐标(IoU>=0.5)
        positive_annotation_path = os.path.join(positive_annotation_dir, sample_name + '_1.csv')
        positive_bndboxes = np.loadtxt(positive_annotation_path, dtype=np.int, delimiter=' ')
        # 提取标注边界框
        gt_annotation_path = os.path.join(gt_annotation_dir, sample_name + '.xml')
        bndboxs = util.parse_xml(gt_annotation_path)
        # 计算符合条件(IoU>0.6)的候选建议
        positive_list = list()
        if len(positive_bndboxes.shape) == 1 and len(positive_bndboxes) != 0:
            scores = util.iou(positive_bndboxes, bndboxs)
            if np.max(scores) > 0.6:
                positive_list.append(positive_bndboxes)
        elif len(positive_bndboxes.shape) == 2:
            for positive_bndboxe in positive_bndboxes:
                scores = util.iou(positive_bndboxe, bndboxs)
                if np.max(scores) > 0.6:
                    positive_list.append(positive_bndboxe)
        else:
            pass

        # 如果存在正样本边界框(IoU>0.6),那么保存相应的图片以及标注边界框