def mergesingle(dstpath, nms, fullname):
    name = util.custombasename(fullname)
    #print('name:', name)
    dstname = os.path.join(dstpath, name + '.txt')
    with open(fullname, 'r') as f_in:
        nameboxdict = {}
        lines = f_in.readlines()
        splitlines = [x.strip().split(' ') for x in lines]
        for splitline in splitlines:
            oriname = splitline[0]
            confidence = splitline[1]
            if float(confidence) < 0.5:
                continue
            points = list(map(float, splitline[2:]))
            rect = cv2.minAreaRect(
                np.float32([[points[0], points[1]], [points[2], points[3]],
                            [points[4], points[5]], [points[6], points[7]]]))
            x_ctr, y_ctr, width, height, angle = rect[0][0], rect[0][1], rect[
                1][0], rect[1][1], rect[2]
            if angle == -90:
                if width <= height:
                    angle = 0
                    width, height = height, width
                else:
                    angle = 90
            elif width >= height:
                angle = -angle
            else:
                angle = -(90 + angle)
                width, height = height, width

            det = list()
            det.extend([x_ctr, y_ctr, width, height, angle])
            det.append(confidence)
            det = list(map(float, det))
            if (oriname not in nameboxdict):
                nameboxdict[oriname] = []
            nameboxdict[oriname].append(det)
        nameboxnmsdict = nmsbynamedict(nameboxdict, nms, nms_thresh)
        with open(dstname, 'w') as f_out:
            print('dstname {}'.format(dstname))
            for imgname in nameboxnmsdict:
                for det in nameboxnmsdict[imgname]:
                    #print('det:', det)
                    confidence = det[-1]
                    rect = det[0:-1]

                    box = cv2.boxPoints(
                        ((rect[0], rect[1]), (rect[2], rect[3]), -rect[4]))
                    box = np.reshape(box, [
                        -1,
                    ])
                    bbox = [
                        box[0], box[1], box[2], box[3], box[4], box[5], box[6],
                        box[7]
                    ]
                    outline = imgname + ' ' + str(confidence) + ' ' + ' '.join(
                        map(str, bbox))
                    #print('outline:', outline)
                    f_out.write(outline + '\n')
예제 #2
0
def DOTA2COCOTest(srcpath, destfile, cls_names):
    imageparent = os.path.join(srcpath, 'images')
    data_dict = {}

    data_dict['images'] = []
    data_dict['categories'] = []
    for idex, name in enumerate(cls_names):
        single_cat = {'id': idex + 1, 'name': name, 'supercategory': name}
        data_dict['categories'].append(single_cat)

    image_id = 1
    with open(destfile, 'w') as f_out:
        filenames = util.GetFileFromThisRootDir(imageparent)
        for file in filenames:
            basename = util.custombasename(file)
            imagepath = os.path.join(imageparent, basename + '.tif')
            img = Image.open(imagepath)
            height = img.height
            width = img.width

            single_image = {}
            single_image['file_name'] = basename + '.tif'
            single_image['id'] = image_id
            single_image['width'] = width
            single_image['height'] = height
            data_dict['images'].append(single_image)

            image_id = image_id + 1
        json.dump(data_dict, f_out)
예제 #3
0
    def splitdata(self, rate):
        """
        :param rate: resize rate before cut
        """

        imagelist = GetFileFromThisRootDir(self.imagepath)
        imagenames = [
            util.custombasename(x) for x in imagelist
            if (util.custombasename(x) != 'Thumbs')
        ]

        worker = partial(self.SplitSingle, rate=rate, extent=self.ext)
        #
        # for name in imagenames:
        #     self.SplitSingle(name, rate, self.ext)
        self.pool.map(worker, imagenames)
예제 #4
0
def mergesingle(dstpath, nms, fullname):
    name = util.custombasename(fullname)
    #print('name:', name)
    dstname = os.path.join(dstpath, name + '.txt')
    with open(fullname, 'r') as f_in:
        nameboxdict = {}
        lines = f_in.readlines()
        splitlines = [x.strip().split(' ') for x in lines]
        for splitline in splitlines:
            oriname = splitline[0]
            confidence = splitline[1]
            if float(confidence) < 0.5:
                continue
            det = list(map(float, splitline[2:]))
            det.append(confidence)
            det = list(map(float, det))
            if (oriname not in nameboxdict):
                nameboxdict[oriname] = []
            nameboxdict[oriname].append(det)
        nameboxnmsdict = nmsbynamedict(nameboxdict, nms, nms_thresh)
        with open(dstname, 'w') as f_out:
            print('dstname {}'.format(dstname))
            for imgname in nameboxnmsdict:
                for det in nameboxnmsdict[imgname]:
                    #print('det:', det)
                    confidence = det[-1]
                    bbox = det[0:-1]
                    outline = imgname + ' ' + str(confidence) + ' ' + ' '.join(map(str, bbox))
                    #print('outline:', outline)
                    f_out.write(outline + '\n')
예제 #5
0
def DOTA2COCO(srcpath, destfile):
    imageparent = os.path.join(srcpath, 'images3')
    labelparent = os.path.join(srcpath, 'labelTxt3')

    data_dict = {}
    info = {
        'contributor': 'captain group',
        'data_created': '2018',
        'description': 'This is 1.0 version of DOTA dataset.',
        'url': 'http://captain.whu.edu.cn/DOTAweb/',
        'version': '1.0',
        'year': 2018
    }
    data_dict['info'] = info
    data_dict['images'] = []
    data_dict['categories'] = []
    data_dict['annotations'] = []
    for idex, name in enumerate(wordname_15):
        single_cat = {'id': idex + 1, 'name': name, 'supercategory': name}
        data_dict['categories'].append(single_cat)

    inst_count = 1
    image_id = 1
    with open(destfile, 'w') as f_out:
        filenames = util.GetFileFromThisRootDir(labelparent)
        for file in filenames:
            basename = util.custombasename(file)
            # image_id = int(basename[1:])

            imagepath = os.path.join(imageparent, basename + '.png')
            img = cv2.imread(imagepath)
            height, width, c = img.shape

            single_image = {}
            single_image['file_name'] = basename + '.png'
            single_image['id'] = image_id
            single_image['width'] = width
            single_image['height'] = height
            data_dict['images'].append(single_image)

            # annotations
            objects = util.parse_dota_poly2(file)
            for obj in objects:
                single_obj = {}
                single_obj['area'] = obj['area']
                single_obj['category_id'] = wordname_15.index(obj['name']) + 1
                single_obj['segmentation'] = []
                single_obj['segmentation'].append(obj['poly'])
                single_obj['iscrowd'] = 0
                xmin, ymin, xmax, ymax = min(obj['poly'][0::2]), min(obj['poly'][1::2]), \
                                         max(obj['poly'][0::2]), max(obj['poly'][1::2])

                width, height = xmax - xmin, ymax - ymin
                single_obj['bbox'] = xmin, ymin, width, height
                single_obj['image_id'] = image_id
                data_dict['annotations'].append(single_obj)
                single_obj['id'] = inst_count
                inst_count = inst_count + 1
            image_id = image_id + 1
        json.dump(data_dict, f_out)
예제 #6
0
def imageformatTrans(srcpath, dstpath, format):
    filelist = util.GetFileFromThisRootDir(srcpath)
    for fullname in filelist:
        img = cv2.imread(fullname)
        basename = util.custombasename(fullname)
        dstname = os.path.join(dstpath, basename + format)
        cv2.imwrite(dstname, img)
예제 #7
0
def DOTA2COCOTrain(srcpath, destfile, cls_names, difficult='2'):
    # set difficult to filter '2', '1', or do not filter, set '-1'

    imageparent = os.path.join(srcpath, 'images')
    labelparent = os.path.join(srcpath, 'labelTxt-v1.0')

    data_dict = {}
    data_dict['images'] = []
    data_dict['categories'] = []
    data_dict['annotations'] = []
    for idex, name in enumerate(cls_names):
        single_cat = {'id': idex + 1, 'name': name, 'supercategory': name}
        data_dict['categories'].append(single_cat)

    inst_count = 1
    image_id = 1
    with open(destfile, 'w') as f_out:
        filenames = util.GetFileFromThisRootDir(labelparent)
        for file in filenames:
            basename = util.custombasename(file)
            # image_id = int(basename[1:])

            imagepath = os.path.join(imageparent, basename + '.png')
            img = Image.open(imagepath)
            height = img.height
            width = img.width
            # img = cv2.imread(imagepath)
            # height, width, c = img.shape

            single_image = {}
            single_image['file_name'] = basename + '.png'
            single_image['id'] = image_id
            single_image['width'] = width
            single_image['height'] = height
            data_dict['images'].append(single_image)

            # annotations
            objects = util.parse_dota_poly2(file)
            for obj in objects:
                if obj['difficult'] == difficult:
                    print('difficult: ', difficult)
                    continue
                single_obj = {}
                single_obj['area'] = obj['area']
                single_obj['category_id'] = cls_names.index(obj['name']) + 1
                single_obj['segmentation'] = []
                single_obj['segmentation'].append(obj['poly'])
                single_obj['iscrowd'] = 0
                xmin, ymin, xmax, ymax = min(obj['poly'][0::2]), min(obj['poly'][1::2]), \
                                         max(obj['poly'][0::2]), max(obj['poly'][1::2])

                width, height = xmax - xmin, ymax - ymin
                single_obj['bbox'] = xmin, ymin, width, height
                single_obj['image_id'] = image_id
                data_dict['annotations'].append(single_obj)
                single_obj['id'] = inst_count
                inst_count = inst_count + 1
            image_id = image_id + 1
        json.dump(data_dict, f_out)
예제 #8
0
 def createIndex(self):
     for filename in self.imgpaths:
         objects = util.parse_dota_poly(filename)
         imgid = util.custombasename(filename)
         self.ImgToAnns[imgid] = objects
         for obj in objects:
             cat = obj['name']
             self.catToImgs[cat].append(imgid)
예제 #9
0
def rotate(srcpath, dstpath, num_process=16):

    pool = Pool(num_process)
    imgnames = util.GetFileFromThisRootDir(os.path.join(srcpath, 'images'))
    names = [util.custombasename(x) for x in imgnames]

    rotate_fun = partial(rotate_single_run, srcpath=srcpath, dstpath=dstpath)

    pool.map(rotate_fun, names)
예제 #10
0
 def __init__(self, basepath):
     self.basepath = basepath
     self.labelpath = os.path.join(basepath, 'labelTxt')
     self.imagepath = os.path.join(basepath, 'images')
     self.imgpaths = util.GetFileFromThisRootDir(self.labelpath)
     self.imglist = [util.custombasename(x) for x in self.imgpaths]
     self.catToImgs = defaultdict(list)
     self.ImgToAnns = defaultdict(list)
     self.createIndex()
예제 #11
0
 def createIndex(self):
     for filename in self.imgpaths:
         objects = util.parse_dota_poly(
             filename
         )  # 返回 object列表,每个 obj 有 name difficult,poly ,area 四个属性
         imgid = util.custombasename(filename)  # 去掉后缀的文件路径
         self.ImgToAnns[imgid] = objects  # 给字典赋值   图像id到object的字典
         for obj in objects:
             cat = obj['name']
             self.catToImgs[cat].append(imgid)  # 种类到图像id的字典
예제 #12
0
 def __init__(self, basepath):
     self.basepath = basepath
     self.labelpath = os.path.join(basepath, 'labelTxt')
     self.imagepath = os.path.join(basepath, 'images')
     self.imgpaths = util.GetFileFromThisRootDir(self.labelpath)
     self.imglist = [util.custombasename(x) for x in self.imgpaths]  # 去掉后缀
     self.catToImgs = defaultdict(
         list)  # defaultdict 接受工厂方法,list 为[] int 为0 str 为"",set为 set()
     self.ImgToAnns = defaultdict(list)
     self.createIndex()  # 创建种类到图片id 以及图像id 到对象列表的索引
예제 #13
0
def Aerial2COCOTrain(srcpath, destfile, cls_names):

    imageparent = os.path.join(srcpath, 'images')
    labelparent = os.path.join(srcpath, 'labelTxt')

    data_dict = {}
    data_dict['images'] = []
    data_dict['categories'] = []
    data_dict['annotations'] = []

    for idex, name in enumerate(cls_names):
        single_cat = {'id': idex + 1, 'name': name, 'supercategory': name}
        data_dict['categories'].append(single_cat)

    inst_count = 1
    image_id = 1

    with open(destfile, 'w') as f_out:
        filenames = util.GetFileFromThisRootDir(labelparent)
        for file in filenames:
            basename = util.custombasename(file)

            imagepath = os.path.join(imageparent, basename + '.tif')

            img = cv2.imread(imagepath)
            height, width, c = img.shape

            single_image = {}
            single_image['file_name'] = basename + '.tif'
            single_image['id'] = image_id
            single_image['width'] = width
            single_image['height'] = height
            data_dict['images'].append(single_image)

            # annotations
            objects = parse_aerial_poly2(file)
            for obj in objects:
                single_obj = {}
                single_obj['area'] = obj['area']
                single_obj['category_id'] = cls_names.index(obj['name']) + 1
                single_obj['segmentation'] = []
                single_obj['segmentation'].append(obj['poly'])
                single_obj['iscrowd'] = 0
                xmin, ymin, xmax, ymax = min(obj['poly'][0::2]), min(obj['poly'][1::2]), \
                                         max(obj['poly'][0::2]), max(obj['poly'][1::2])

                width, height = xmax - xmin, ymax - ymin
                single_obj['bbox'] = xmin, ymin, width, height
                single_obj['image_id'] = image_id
                data_dict['annotations'].append(single_obj)
                single_obj['id'] = inst_count
                inst_count = inst_count + 1
            image_id = image_id + 1
        json.dump(data_dict, f_out, indent=2)
    def splitdata(self, rate):
        """
        :param rate: resize rate before cut
        """
        imagelist = GetFileFromThisRootDir(self.imagepath)
        imagenames = [
            util.custombasename(x) for x in imagelist
            if (util.custombasename(x) != 'Thumbs')
        ]
        if self.num_process == 1:
            for name in imagenames:
                self.SplitSingle(name, rate, self.ext)
        else:

            # worker = partial(self.SplitSingle, rate=rate, extent=self.ext)
            worker = partial(split_single_warp,
                             split_base=self,
                             rate=rate,
                             extent=self.ext)
            self.pool.map(worker, imagenames)
예제 #15
0
def mergebase(srcpath, dstpath, nms):
    if not os.path.exists(dstpath):
        os.makedirs(dstpath)
    img_result = {}
    filelist = util.GetFileFromThisRootDir(srcpath)
    for fullname in filelist:
        name = util.custombasename(fullname)
        #print('name:', name)
        dstname = os.path.join(dstpath, name + '.txt')
        with open(fullname, 'r') as f_in:
            nameboxdict = {}
            lines = f_in.readlines()
            splitlines = [x.strip().split(' ') for x in lines]
            for splitline in splitlines:
                subname = splitline[0]
                splitname = subname.split('__')
                oriname = splitname[0]
                pattern1 = re.compile(r'__\d+___\d+')
                #print('subname:', subname)
                x_y = re.findall(pattern1, subname)
                x_y_2 = re.findall(r'\d+', x_y[0])
                x, y = int(x_y_2[0]), int(x_y_2[1])

                pattern2 = re.compile(r'__([\d+\.]+)__\d+___')

                rate = re.findall(pattern2, subname)[0]

                confidence = splitline[1]
                poly = list(map(float, splitline[2:]))
                origpoly = poly2origpoly(poly, x, y, rate)
                det = origpoly
                det.append(confidence)
                det = list(map(float, det))
                if (oriname not in nameboxdict):
                    nameboxdict[oriname] = []
                nameboxdict[oriname].append(det)
            nameboxnmsdict = nmsbynamedict(nameboxdict, nms, nms_thresh)
            with open(dstname, 'w') as f_out:
                for imgname in nameboxnmsdict:
                    if (imgname not in img_result):
                        img_result[imgname] = {}
                    if (name not in img_result[imgname]):
                        img_result[imgname][name] = []
                    for det in nameboxnmsdict[imgname]:
                        img_result[imgname][name].append(det)
                        #print('det:', det)
                        confidence = det[-1]
                        bbox = det[0:-1]
                        outline = imgname + ' ' + str(
                            confidence) + ' ' + ' '.join(map(str, bbox))
                        #print('outline:', outline)
                        f_out.write(outline + '\n')
    return img_result
def mergesingle(dstpath, nms, nms_thresh, fullname):
    name = util.custombasename(fullname)
    # print("!!!!!!!!!!!!")
    # print('name:', name)
    # print('fullname: ', fullname)  work_dirs/faster_rcnn_obb_r50_fpn_1x_ssdd/Task1_results/s.txt

    dstname = os.path.join(dstpath, name + '.txt')
    with open(fullname, 'r') as f_in:

        nameboxdict = {}
        lines = f_in.readlines()
        splitlines = [x.strip().split(' ') for x in lines]
        for splitline in splitlines:
            # print(splitline)
            subname = splitline[0]
            # print(subname)
            splitname = subname.split('__')
            # print(splitname)
            oriname = splitname[0]
            # print(oriname)
            # pattern1 = re.compile(r'__\d+___\d+')
            # #print('subname:', subname)
            # x_y = re.findall(pattern1, subname)

            # x_y_2 = re.findall(r'\d+', x_y[0])
            # x, y = int(x_y_2[0]), int(x_y_2[1])

            # pattern2 = re.compile(r'__([\d+\.]+)__\d+___')

            # rate = re.findall(pattern2, subname)[0]

            confidence = splitline[1]
            poly = list(map(float, splitline[2:]))
            # origpoly = poly2origpoly(poly, x, y, rate)
            det = poly
            det.append(confidence)
            det = list(map(float, det))
            if (oriname not in nameboxdict):
                nameboxdict[oriname] = []
            nameboxdict[oriname].append(det)
        nameboxnmsdict = nmsbynamedict(nameboxdict, nms, nms_thresh)
        with open(dstname, 'w') as f_out:
            for imgname in nameboxnmsdict:
                for det in nameboxnmsdict[imgname]:
                    #print('det:', det)
                    confidence = det[-1]
                    bbox = det[0:-1]
                    outline = imgname + ' ' + str(confidence) + ' ' + ' '.join(
                        map(str, bbox))
                    #print('outline:', outline)
                    f_out.write(outline + '\n')
def mergesingle(dstpath, nms, fullname):
    """
    一个full name一个fullname merge
    :param dstpath:
    :param nms:
    :param fullname:
    :return:
    """
    name = util.custombasename(fullname)
    # print('name:', name)
    dstname = os.path.join(dstpath,
                           name + '.txt')  #dstname 应该对应的是某一类的所有图片的检测内容
    with open(fullname, 'r') as f_in:
        nameboxdict = {}
        lines = f_in.readlines()
        splitlines = [x.strip().split(' ') for x in lines]
        for splitline in splitlines:
            subname = splitline[0]  # 子图像名
            splitname = subname.split('__')  # 子图像名的分割
            oriname = splitname[0]  # 原图像名
            pattern1 = re.compile(r'__\d+___\d+')
            # print('subname:', subname)
            x_y = re.findall(pattern1, subname)
            x_y_2 = re.findall(r'\d+', x_y[0])
            x, y = int(x_y_2[0]), int(x_y_2[1])  # 解析出相对于子图像相对于原图像的坐标

            pattern2 = re.compile(r'__([\d+\.]+)__\d+___')

            rate = re.findall(pattern2, subname)[0]  # string类型的缩放率

            confidence = splitline[1]
            poly = list(map(float, splitline[2:]))
            origpoly = poly2origpoly(poly, x, y, rate)
            det = origpoly  # 还原到原图像的多边形   [x1,y1,x2,y2,x3,y3,x4,y4]
            det.append(confidence)  # [x1,y1,x2,y2,x3,y3,x4,y4,confidence]
            det = list(map(float, det))
            if (oriname not in nameboxdict):  # nameboxdict 是原图 对应 det 的 字典
                nameboxdict[oriname] = []
            nameboxdict[oriname].append(det)
        nameboxnmsdict = nmsbynamedict(nameboxdict, nms,
                                       nms_thresh)  #nameboxdict 进行nms操作
        with open(dstname, 'w') as f_out:
            for imgname in nameboxnmsdict:
                for det in nameboxnmsdict[imgname]:
                    # print('det:', det)
                    confidence = det[-1]
                    bbox = det[0:-1]
                    outline = imgname + ' ' + str(confidence) + ' ' + ' '.join(
                        map(str, bbox))  # 原图,置信度,8值坐标
                    # print('outline:', outline)
                    f_out.write(outline + '\n')
예제 #18
0
def mergebase(srcpath, dstpath, nms):
    """
    读入子图的检测结果,返回最终的检测结果,还是子图的检测结果名称还有下划线
    :param srcpath:
    :param dstpath:
    :param nms:
    :return:
    """
    filelist = util.GetFileFromThisRootDir(srcpath)
    for fullname in filelist:
        name = util.custombasename(fullname)  # 去掉拓展名
        # print('name:', name)
        dstname = os.path.join(dstpath, name + '.txt')  #形成dstname 的目标文件
        with open(fullname, 'r') as f_in:
            nameboxdict = {}
            lines = f_in.readlines()
            splitlines = [x.strip().split(' ') for x in lines]
            for splitline in splitlines:
                subname = splitline[0]    #子图的名称
                splitname = subname.split('__')
                oriname = splitname[0]  # 原图的名称
                pattern1 = re.compile(r'__\d+___\d+')
                # print('subname:', subname)
                x_y = re.findall(pattern1, subname)  # 返回的是一个只包含一个元素的list,所以下一步[0]
                x_y_2 = re.findall(r'\d+', x_y[0])
                x, y = int(x_y_2[0]), int(x_y_2[1])  # 解出 x,y的值

                pattern2 = re.compile(r'__([\d+\.]+)__\d+___')

                rate = re.findall(pattern2, subname)[0]  # 缩放比例

                confidence = splitline[1]       # 置信度
                poly = list(map(float, splitline[2:])) # 解出多边形的点
                origpoly = poly2origpoly(poly, x, y, rate) # 还原出原来的poly
                det = origpoly
                det.append(confidence)   # [8个坐标,置信度]
                det = list(map(float, det))
                if (oriname not in nameboxdict):
                    nameboxdict[oriname] = []
                nameboxdict[oriname].append(det)   # 原图对应的多个检测结果
            nameboxnmsdict = nmsbynamedict(nameboxdict, nms, nms_thresh)   # 进行nms
            with open(dstname, 'w') as f_out:
                for imgname in nameboxnmsdict:
                    for det in nameboxnmsdict[imgname]:
                        # print('det:', det)
                        confidence = det[-1]
                        bbox = det[0:-1]
                        outline = imgname + ' ' + str(confidence) + ' ' + ' '.join(map(str, bbox))
                        # print('outline:', outline)
                        f_out.write(outline + '\n')
예제 #19
0
파일: ICDAR2COCO.py 프로젝트: llmir/toolbox
def HRSC2COCOTest(srcpath, destfile, cls_names, test_set_file):
    imageparent = os.path.join(srcpath, 'images')
    # labelparent = os.path.join(srcpath, 'labelTxt')
    data_dict = {}
    info = {
        'contributor': 'Jian Ding',
        'data_created': '2019',
        'description': 'This is HRSC.',
        'url': 'http://captain.whu.edu.cn/DOTAweb/',
        'version': '1.0',
        'year': 2018
    }
    data_dict['info'] = info
    data_dict['images'] = []
    data_dict['categories'] = []
    for idex, name in enumerate(cls_names):
        single_cat = {'id': idex + 1, 'name': name, 'supercategory': name}
        data_dict['categories'].append(single_cat)

    inst_count = 1
    image_id = 1
    with open(destfile, 'w') as f_out:
        # filenames = util.GetFileFromThisRootDir(imageparent)
        with open(test_set_file, 'r') as f_in:
            lines = f_in.readlines()
            filenames = [
                os.path.join(imageparent, x.strip()) + '.bmp' for x in lines
            ]

        for file in filenames:
            basename = util.custombasename(file)
            # image_id = int(basename[1:])

            imagepath = os.path.join(imageparent, basename + '.bmp')
            # img = cv2.imread(imagepath)
            img = Image.open(imagepath)
            # height, width, c = img.shape
            height = img.height
            width = img.width

            single_image = {}
            single_image['file_name'] = basename + '.bmp'
            single_image['id'] = image_id
            single_image['width'] = width
            single_image['height'] = height
            data_dict['images'].append(single_image)

            image_id = image_id + 1
        json.dump(data_dict, f_out)
예제 #20
0
def mergesingle(dstpath, nms, fullname):
    """
    一个full name一个fullname merge
    :param dstpath:
    :param nms:
    :param fullname:
    :return:
    """
    name = util.custombasename(fullname)
    # print('name:', name)
    dstname = os.path.join(dstpath, name + '.txt')
    with open(fullname, 'r') as f_in:
        nameboxdict = {}
        lines = f_in.readlines()
        splitlines = [x.strip().split(' ') for x in lines]
        for splitline in splitlines:
            subname = splitline[0]
            splitname = subname.split('__')
            oriname = splitname[0]
            pattern1 = re.compile(r'__\d+___\d+')
            # print('subname:', subname)
            x_y = re.findall(pattern1, subname)
            x_y_2 = re.findall(r'\d+', x_y[0])
            x, y = int(x_y_2[0]), int(x_y_2[1])

            pattern2 = re.compile(r'__([\d+\.]+)__\d+___')

            rate = re.findall(pattern2, subname)[0]

            confidence = splitline[1]
            poly = list(map(float, splitline[2:]))
            origpoly = poly2origpoly(poly, x, y, rate)
            det = origpoly
            det.append(confidence)
            det = list(map(float, det))
            if (oriname not in nameboxdict):
                nameboxdict[oriname] = []
            nameboxdict[oriname].append(det)
        nameboxnmsdict = nmsbynamedict(nameboxdict, nms, nms_thresh)
        with open(dstname, 'w') as f_out:
            for imgname in nameboxnmsdict:
                for det in nameboxnmsdict[imgname]:
                    # print('det:', det)
                    confidence = det[-1]
                    bbox = det[0:-1]
                    outline = imgname + ' ' + str(confidence) + ' ' + ' '.join(map(str, bbox))
                    # print('outline:', outline)
                    f_out.write(outline + '\n')
예제 #21
0
def ICDAR2COCOTest(srcpath, destfile, cls_names):
    imageparent = os.path.join(srcpath, 'images')
    # labelparent = os.path.join(srcpath, 'labelTxt')
    data_dict = {}
    info = {
        'contributor': 'ming71',
        'data_created': '2020',
        'description': 'ICDAR15 dataset',
        'url': 'sss',
        'version': '1.0',
        'year': 2020
    }
    data_dict['info'] = info
    data_dict['images'] = []
    data_dict['categories'] = []
    for idex, name in enumerate(cls_names):
        single_cat = {'id': idex + 1, 'name': name, 'supercategory': name}
        data_dict['categories'].append(single_cat)

    inst_count = 1
    image_id = 1
    with open(destfile, 'w') as f_out:
        # filenames = util.GetFileFromThisRootDir(imageparent)
        filenames = glob.glob(imageparent + '/*.jpg')

        for file in tqdm(filenames):
            basename = util.custombasename(file)
            # image_id = int(basename[1:])

            imagepath = os.path.join(imageparent, basename + '.jpg')
            # img = cv2.imread(imagepath)
            img = Image.open(imagepath)
            # height, width, c = img.shape
            height = img.height
            width = img.width

            single_image = {}
            single_image['file_name'] = basename + '.jpg'
            single_image['id'] = image_id
            single_image['width'] = width
            single_image['height'] = height
            data_dict['images'].append(single_image)

            image_id = image_id + 1
        json.dump(data_dict, f_out)
def rotate(srcpath, dstpath):

    pool = Pool(16)
    imgnames = util.GetFileFromThisRootDir(os.path.join(srcpath, 'images'))
    names = [util.custombasename(x) for x in imgnames]

    dst_imgpath = os.path.join(dstpath, 'images')
    dst_labelTxt = os.path.join(dstpath, 'labelTxt')

    if not os.path.exists(dst_imgpath):
        os.mkdir(dst_imgpath)

    if not os.path.exists(dst_labelTxt):
        os.mkdir(dst_labelTxt)

    rotate_fun = partial(rotate_single_run, srcpath=srcpath, dstpath=dstpath)

    pool.map(rotate_fun, names)
예제 #23
0
def extract_largesize_index(labelpath):
    filenames = util.GetFileFromThisRootDir(labelpath)
    large_size_index = []
    for name in filenames:
        objs = util.parse_dota_poly(name)
        flag = 0
        for obj in objs:
            poly = np.array(obj['poly'])
            xmin, ymin, xmax, ymax = np.min(poly[:, 0]), np.min(poly[:, 1]), np.max(poly[:, 0]), np.max(poly[:, 1])
            w = xmax - xmin
            h = ymax - ymin
            max_side = max(w, h)
            if max_side > 400:
                flag = 1
                break
        if flag:
            large_size_index.append(util.custombasename(name))
    # print('index:', large_size_index)
    # print('len:', len(large_size_index))

    return large_size_index
예제 #24
0
def drwaBox_parallel(srcpath, imgpath, dstpath):
    pool = Pool(16)
    filelist = util.GetFileFromThisRootDir(srcpath)
    nameboxdict = {}
    for file in filelist:
        name = util.custombasename(file)
        with open(file, 'r') as f_in:
            lines = f_in.readlines()
            splitlines = [x.strip().split(' ') for x in lines]
            for splitline in splitlines:
                oriname = splitline[0]
                confidence = float(splitline[1])
                bbox = list(map(float, splitline[2:]))
                bbox.append(confidence)
                bbox.append(name.split('_')[-1])
                if (oriname not in nameboxdict):
                    nameboxdict[oriname] = []
                nameboxdict[oriname].append(bbox)

    drawBoxsingle_fn = partial(drawBoxsingle, imgpath, dstpath, nameboxdict)
    # pdb.set_trace()
    pool.map(drawBoxsingle_fn, nameboxdict)
예제 #25
0
def ICDAR2COCOTrain(srcpath, destfile, cls_names):
    imageparent = os.path.join(srcpath, 'images')
    labelparent = os.path.join(srcpath, 'labels')

    data_dict = {}
    info = {
        'contributor': 'ming71',
        'data_created': '2020',
        'description': 'ICDAR15 dataset',
        'url': 'sss',
        'version': '1.0',
        'year': 2020
    }
    data_dict['info'] = info
    data_dict['images'] = []
    data_dict['categories'] = []
    data_dict['annotations'] = []
    for idex, name in enumerate(cls_names):
        single_cat = {'id': idex + 1, 'name': name, 'supercategory': name}
        data_dict['categories'].append(single_cat)

    inst_count = 1
    image_id = 1
    with open(destfile, 'w') as f_out:
        # filenames = util.GetFileFromThisRootDir(labelparent)
        filenames = glob.glob(labelparent + '/*.txt')

        for file in tqdm(filenames):
            basename = util.custombasename(file)
            # image_id = int(basename[1:])

            imagepath = os.path.join(imageparent, basename + '.jpg')

            img = Image.open(imagepath)
            height = img.height
            width = img.width

            single_image = {}
            single_image['file_name'] = basename + '.jpg'
            single_image['id'] = image_id
            single_image['width'] = width
            single_image['height'] = height
            data_dict['images'].append(single_image)

            # annotations
            objects = parse_icdar_poly2(file)
            for obj in objects:
                single_obj = {}
                single_obj['area'] = obj['area']
                single_obj['category_id'] = cls_names.index(obj['name']) + 1
                single_obj['segmentation'] = []
                single_obj['segmentation'].append(obj['poly'])
                single_obj['iscrowd'] = 0
                xmin, ymin, xmax, ymax = min(obj['poly'][0::2]), min(obj['poly'][1::2]), \
                                         max(obj['poly'][0::2]), max(obj['poly'][1::2])

                width, height = xmax - xmin, ymax - ymin
                single_obj['bbox'] = xmin, ymin, width, height
                single_obj['image_id'] = image_id
                data_dict['annotations'].append(single_obj)
                single_obj['id'] = inst_count
                inst_count = inst_count + 1
            image_id = image_id + 1
        json.dump(data_dict, f_out)
예제 #26
0
def mergebase(srcpath, dstpath, nms, isTask2=True):
    filelist = util.GetFileFromThisRootDir(srcpath)
    for fullname in filelist:
        name = util.custombasename(fullname)
        #print('name:', name)
        dstname = os.path.join(dstpath, name + '.txt')
        with open(fullname, 'r') as f_in:
            nameboxdict = {}
            lines = f_in.readlines()
            splitlines = [x.strip().split(' ') for x in lines]
            for splitline in splitlines:
                subname = splitline[0]
                '''
                splitname = subname.split('__')
                oriname = splitname[0]
                pattern1 = re.compile(r'__\d+___\d+')
                #print('subname:', subname)
                x_y = re.findall(pattern1, subname)
                x_y_2 = re.findall(r'\d+', x_y[0])
                x, y = int(x_y_2[0]), int(x_y_2[1])

                pattern2 = re.compile(r'__([\d+\.]+)__\d+___')

                rate = re.findall(pattern2, subname)[0]
                '''
                splitname = subname.split('_')
                oriname = splitname[0]
                x = int(splitname[2])
                y = int(splitname[3])
                rate = int(splitname[1])

                confidence = splitline[1]
                poly = list(map(float, splitline[2:]))
                if isTask2:
                    x1 = np.round(poly[0])
                    y1 = np.round(poly[1])
                    x2 = np.round(poly[0] + poly[2] - 1)
                    y2 = np.round(poly[1] + poly[3] - 1)
                    poly = [x1, y1, x2, y1, x2, y2, x1, y2]
                origpoly = poly2origpoly(poly, x, y, rate)
                det = origpoly
                det.append(confidence)
                det = list(map(float, det))
                if (oriname not in nameboxdict):
                    nameboxdict[oriname] = []
                nameboxdict[oriname].append(det)
            nameboxnmsdict = nmsbynamedict(nameboxdict, nms, nms_thresh)
            with open(dstname, 'w') as f_out:
                for imgname in nameboxnmsdict:
                    for det in nameboxnmsdict[imgname]:
                        #print('det:', det)
                        confidence = det[-1]
                        bbox = det[0:-1]
                        if not isTask2:
                            outline = imgname + ' ' + str(
                                confidence) + ' ' + ' '.join(map(str, bbox))
                        else:
                            outline = imgname + ' ' + str(
                                confidence) + ' ' + ' '.join(
                                    map(str,
                                        [bbox[0], bbox[1], bbox[4], bbox[5]]))
                        #print('outline:', outline)
                        f_out.write(outline + '\n')
예제 #27
0
def prepare():
    args = parse_args()
    data_root_path = args.data_path

    train_path = os.path.join(data_root_path, 'train')
    val_path = os.path.join(data_root_path, 'val')
    test_path = os.path.join(data_root_path, 'test')

    if not os.path.exists(os.path.join(data_root_path, 'trainval_large')):
        os.makedirs(os.path.join(data_root_path, 'trainval_large'))
    if not os.path.exists(os.path.join(data_root_path, 'trainval_large', 'images')):
        os.makedirs(os.path.join(data_root_path, 'trainval_large', 'images'))
    if not os.path.exists(os.path.join(data_root_path, 'trainval_large', 'labelTxt')):
        os.makedirs(os.path.join(data_root_path, 'trainval_large', 'labelTxt'))

    if not os.path.exists(os.path.join(data_root_path, 'trainval1024_1')):
        os.makedirs(os.path.join(data_root_path, 'trainval1024_1'))

    split_train = ImgSplit_multi_process.splitbase(train_path,
                       os.path.join(data_root_path, 'trainval1024_1'),
                      gap=200,
                      subsize=1024,
                      num_process=args.num_process
                      )
    split_train.splitdata(1)

    split_val = ImgSplit_multi_process.splitbase(val_path,
                        os.path.join(data_root_path, 'trainval1024_1'),
                         gap=200,
                         subsize=1024,
                         num_process=args.num_process
                        )
    split_val.splitdata(1)

    # extract train images contain large intances
    train_large_names = extract_largesize_index(os.path.join(data_root_path, 'train', 'labelTxt'))
    filecopy(os.path.join(data_root_path, 'train', 'labelTxt'),
             os.path.join(data_root_path, 'trainval_large', 'labelTxt'),
             train_large_names,
             '.txt',
             num_process=args.num_process)
    filecopy(os.path.join(data_root_path, 'train', 'images'),
             os.path.join(data_root_path, 'trainval_large', 'images'),
             train_large_names,
             '.png',
             num_process=args.num_process)

    # extract val images contain large instances
    val_large_names = extract_largesize_index(os.path.join(data_root_path, 'val', 'labelTxt'))
    filecopy(os.path.join(data_root_path, 'val', 'labelTxt'),
             os.path.join(data_root_path, 'trainval_large', 'labelTxt'),
             val_large_names,
             '.txt',
             num_process=args.num_process)
    filecopy(os.path.join(data_root_path, 'val', 'images'),
             os.path.join(data_root_path, 'trainval_large', 'images'),
             val_large_names,
             '.png',
             num_process=args.num_process)

    # split for images contin large size instances
    if not os.path.exists(os.path.join(data_root_path, 'trainval_large_1024_0.4')):
        os.makedirs(os.path.join(data_root_path, 'trainval_large_1024_0.4'))
    split_trainval_large = ImgSplit_multi_process.splitbase(os.path.join(data_root_path, 'trainval_large'),
                                    os.path.join(data_root_path, 'trainval_large_1024_0.4'),
                                    gap=512,
                                    subsize=1024,
                                    num_process=args.num_process)
    split_trainval_large.splitdata(0.4)

    # rotate augment for images contain large size instances
    rotate_augment(os.path.join(data_root_path, 'trainval_large_1024_0.4'),
                   os.path.join(data_root_path, 'trainval_large_1024_0.4_rotate'))

    # copy files to images and labelTxt
    if not os.path.exists(os.path.join(data_root_path, 'images')):
        os.makedirs(os.path.join(data_root_path, 'images'))
    if not os.path.exists(os.path.join(data_root_path, 'labelTxt')):
        os.makedirs(os.path.join(data_root_path, 'labelTxt'))

    filemove_v2(os.path.join(data_root_path, 'trainval1024_1', 'images'),
                os.path.join(data_root_path, 'images'),
                '.png',
                num_process=args.num_process
                )
    filemove_v2(os.path.join(data_root_path, 'trainval1024_1', 'labelTxt'),
                os.path.join(data_root_path, 'labelTxt'),
                '.txt',
                num_process=args.num_process
                )

    filemove_v2(os.path.join(data_root_path, 'trainval_large_1024_0.4', 'images'),
                os.path.join(data_root_path, 'images'),
                '.png',
                num_process=args.num_process
                )
    filemove_v2(os.path.join(data_root_path, 'trainval_large_1024_0.4', 'labelTxt'),
                os.path.join(data_root_path, 'labelTxt'),
                '.txt',
                num_process=args.num_process
                )

    filemove_v2(os.path.join(data_root_path, 'trainval_large_1024_0.4_rotate', 'images'),
                os.path.join(data_root_path, 'images'),
                '.png',
                num_process=args.num_process
                )
    filemove_v2(os.path.join(data_root_path, 'trainval_large_1024_0.4_rotate', 'labelTxt'),
                os.path.join(data_root_path, 'labelTxt'),
                '.txt',
                num_process=args.num_process
                )

    train_without_balance = util.GetFileFromThisRootDir(os.path.join(data_root_path, 'labelTxt'))
    train_without_balance_names = [util.custombasename(x.strip()) for x in train_without_balance]

    # data balance
    with open('train_balance_extend.txt', 'r') as f_in:
        train_balance_names = f_in.readlines()
        train_balance_names = [x.strip() for x in train_balance_names]
    train_names = train_without_balance_names + train_balance_names
    with open(os.path.join(data_root_path, 'train.txt'), 'w') as f_out:
        for index, name in enumerate(train_names):
            if index == (len(train_names) - 1):
                f_out.write(name)
            else:
                f_out.write(name + '\n')

    # prepare test data
    if not os.path.exists(os.path.join(data_root_path, 'test1024')):
        os.makedirs(os.path.join(data_root_path, 'test1024'))

    split_test = SplitOnlyImage_multi_process.splitbase(os.path.join(test_path, 'images'),
                       os.path.join(data_root_path, 'test1024', 'images'),
                      gap=512,
                      subsize=1024,
                      num_process=args.num_process
                      )
    split_test.splitdata(1)
    split_test.splitdata(0.5)

    test_names = util.GetFileFromThisRootDir(os.path.join(data_root_path, 'test1024', 'images'))
    test_names = [util.custombasename(x.strip()) for x in test_names]

    with open(os.path.join(data_root_path, 'test.txt'), 'w') as f_out:
        for index, name in enumerate(test_names):
            if index == (len(test_names) - 1):
                f_out.write(name)
            else:
                f_out.write(name + '\n')

    filemove_v2(os.path.join(data_root_path, 'test1024', 'images'),
                os.path.join(data_root_path, 'images'),
                '.png',
                num_process=args.num_process)

    shutil.rmtree(os.path.join(data_root_path, r'trainval_large_1024_0.4'))
    shutil.rmtree(os.path.join(data_root_path, r'trainval_large_1024_0.4_rotate'))
    shutil.rmtree(os.path.join(data_root_path, r'test1024'))
    shutil.rmtree(os.path.join(data_root_path, r'trainval1024_1'))
    shutil.rmtree(os.path.join(data_root_path, r'trainval_large'))
def HRSC2COCOTrain(srcpath, destfile, cls_names, train_set_file):
    imageparent = os.path.join(srcpath, 'images')
    labelparent = os.path.join(srcpath, 'labelTxt_L1')

    data_dict = {}
    info = { 'contributor': 'Jian Ding',
             'data_created': '2019',
             'description': 'This is the L1 of HRSC',
             'url': 'sss',
             'version': '1.0',
             'year': 2019}
    data_dict['info'] = info
    data_dict['images'] = []
    data_dict['categories'] = []
    data_dict['annotations'] = []
    for idex, name in enumerate(cls_names):
        single_cat = {'id': idex + 1, 'name': name, 'supercategory': name}
        data_dict['categories'].append(single_cat)

    inst_count = 1
    image_id = 1
    with open(destfile, 'w') as f_out:
        # filenames = util.GetFileFromThisRootDir(labelparent)
        with open(train_set_file, 'r') as f_in:
            lines = f_in.readlines()
            filenames = [os.path.join(labelparent, x.strip()) + '.txt' for x in lines]

        for file in filenames:
            basename = util.custombasename(file)
            # image_id = int(basename[1:])

            imagepath = os.path.join(imageparent, basename + '.bmp')

            img = Image.open(imagepath)
            height = img.height
            width = img.width

            print('height: ', height)
            print('width: ', width)

            single_image = {}
            single_image['file_name'] = basename + '.bmp'
            single_image['id'] = image_id
            single_image['width'] = width
            single_image['height'] = height
            data_dict['images'].append(single_image)

            # annotations
            objects = util.parse_dota_poly2(file)
            for obj in objects:
                single_obj = {}
                single_obj['area'] = obj['area']
                single_obj['category_id'] = cls_names.index(obj['name']) + 1
                single_obj['segmentation'] = []
                single_obj['segmentation'].append(obj['poly'])
                single_obj['iscrowd'] = 0
                xmin, ymin, xmax, ymax = min(obj['poly'][0::2]), min(obj['poly'][1::2]), \
                                         max(obj['poly'][0::2]), max(obj['poly'][1::2])

                width, height = xmax - xmin, ymax - ymin
                single_obj['bbox'] = xmin, ymin, width, height
                single_obj['image_id'] = image_id
                data_dict['annotations'].append(single_obj)
                single_obj['id'] = inst_count
                inst_count = inst_count + 1
            image_id = image_id + 1
        json.dump(data_dict, f_out)
예제 #29
0
    import dota_kit.dota_utils as util
srcpath1 = '/mnt/lustre/yanhongchang/project/one-rpn/mmdetection/demo/work_dirs/out_img/faster_x101/crop512/txt2'
srcpath2 = '/mnt/lustre/yanhongchang/project/one-rpn/mmdetection/demo/work_dirs/out_img/faster_x101/crop1024/txt2'
srcpath3 = '/mnt/lustre/yanhongchang/project/one-rpn/mmdetection/demo/work_dirs/out_img/faster_x101/origin/txt2'
dstpath = '/mnt/lustre/yanhongchang/project/one-rpn/mmdetection/demo/work_dirs/out_img/faster_x101/tr_merge3/txt'


def osp(savepath):
    if not os.path.exists(savepath):
        os.makedirs(savepath)


osp(dstpath)
filelist = util.GetFileFromThisRootDir(srcpath1)
for filename in filelist:
    name = util.custombasename(filename)
    print(name)
    file_1 = open(os.path.join(srcpath1, name + '.txt'), 'r')
    file_2 = open(os.path.join(srcpath2, name + '.txt'), 'r')
    file_3 = open(os.path.join(srcpath3, name + '.txt'), 'r')
    file_new = open(os.path.join(dstpath, name + '.txt'), 'w')

    list1 = []
    for line in file_1.readlines():
        ss = line.strip()
        list1.append(ss)
    file_1.close()

    list2 = []
    for line in file_2.readlines():
        ss = line.strip()
예제 #30
0
def filemove_v2(srcpath, dstpath, extent, num_process=32):
    filelist = util.GetFileFromThisRootDir(srcpath)
    filenames = [util.custombasename(x.strip()) for x in filelist]
    print('srcpath: ', srcpath)
    print('num: ', len(filenames))
    filemove(srcpath, dstpath, filenames, extent, num_process)