コード例 #1
0
def preds_to_voc_format(img_name,
                        bboxes,
                        labels,
                        classes_selected,
                        class_names=[],
                        thresh=0.5):
    if (len(classes_selected) == 1 and classes_selected[0] == ""):
        classes_selected = class_names
    combined = []
    img = cv2.imread(img_name)
    h, w, c = img.shape
    writer = Writer(img_name, w, h)
    i = 0
    for j in range(len(bboxes)):
        x1 = int(bboxes[j][0])
        y1 = int(bboxes[j][1])
        x2 = int(bboxes[j][2])
        y2 = int(bboxes[j][3])
        score = float(bboxes[j][4])
        class_name = class_names[labels[j]]

        if (score >= thresh and class_name in classes_selected):
            writer.addObject(class_name, x1, y1, x2, y2)

    return writer
def save_label_object(classes_id, rects, frame, camera_id, folderobject):
    (H, W) = frame.shape[0:2]
    now = datetime.datetime.now()
    # date_folder = now.strftime('%Y-%m-%d')
    date_folder = now.strftime('%Y_%m_%d_%H_%M_%S')
    current_minute = datetime.datetime.now().minute
    ob_name = "f_{}_{}_{}".format(camera_id, date_folder,
                                  np.random.randint(100))
    xml_name = "{}.xml".format(ob_name)
    jpg_name = "{}.jpg".format(ob_name)
    print("save : {}".format(xml_name))
    xml_full = os.path.join(folderobject, xml_name)
    jpg_full = os.path.join(folderobject, jpg_name)

    writerXML = WriterXML(jpg_full, W, H)
    for index, (x, y, w, h) in enumerate(rects):
        xmin = max(0, x)
        ymin = max(0, y)
        xmax = min(x + w, W)
        ymax = min(y + h, H)

        label_ob = "{}".format(LABELS[classes_id[index]])
        writerXML.addObject(label_ob, xmin, ymin, xmax, ymax)

    # print(jpg_full)
    cv2.imwrite(jpg_full, frame)
    writerXML.save(xml_full)
コード例 #3
0
    def save(self):
        if self.filenameBuffer is None:
            w, h = self.img.size
            self.writer = Writer(os.path.join(self.imageDirPathBuffer , self.imageList[self.cur]), w, h)
            self.annotation_file = open('annotations/' + self.anno_filename, 'a')
            for idx, item in enumerate(self.bboxList):
                x1, y1, x2, y2 = self.bboxList[idx]
                self.writer.addObject(str(self.objectLabelList[idx]), x1, y1, x2, y2)
                self.annotation_file.write(self.imageDirPathBuffer + '/' + self.imageList[self.cur] + ',' +
                                           ','.join(map(str, self.bboxList[idx])) + ',' + str(self.objectLabelList[idx])
                                           + '\n')
            self.annotation_file.close()
            baseName = os.path.splitext(self.imageList[self.cur])[0]
            save_dir = 'annotations/annotations_voc/'
            save_path = save_dir + baseName + '.xml'
            if(not os.path.exists(save_dir)):
                os.mkdir(save_dir)

            self.writer.save(save_path)
            self.writer = None
        else:
            w, h = self.img.size
            self.writer = Writer(self.filenameBuffer, w, h)
            self.annotation_file = open('annotations/' + self.anno_filename, 'a')
            for idx, item in enumerate(self.bboxList):
                x1, y1, x2, y2 = self.bboxList[idx]
                self.writer.addObject(str(self.objectLabelList[idx]), x1, y1, x2, y2)
                self.annotation_file.write(self.filenameBuffer + ',' + ','.join(map(str, self.bboxList[idx])) + ','
                                           + str(self.objectLabelList[idx]) + '\n')
            self.annotation_file.close()
            baseName = os.path.splitext(self.imageList[self.cur])[0]
            self.writer.save('annotations/annotations_voc/' + baseName + '.xml')
            self.writer = None
コード例 #4
0
def save_data(artificial_image, semantic_label, obj_det_label, index,
              generator_options):
    """
    This function saves the artificial image and its corresponding semantic
    label. Also saves object detection labels, plot preview and segmentation
    mask images based on "generator_options".

    :param artificial_image: The artificial image which needs to be saved.
    :param semantic_label: The semantic segmentation label image which
                           needs to be saved.
    :param obj_det_label: The object detection label which needs to be
                          saved. Can be None if "save_obj_det_label" is false.
    :param index: The index value to be included in the name of the files.
    :return: No returns.
    """
    cv2.imwrite(
        os.path.join(
            generator_options.get_image_save_path(),
            generator_options.get_name_format() %
            (index + generator_options.get_start_index()) + '.jpg'),
        artificial_image)

    cv2.imwrite(
        os.path.join(
            generator_options.get_label_save_path(),
            generator_options.get_name_format() %
            (index + generator_options.get_start_index()) + '.png'),
        semantic_label)
    if generator_options.get_save_obj_det_label():
        img_path = os.path.join(
            generator_options.get_image_save_path(),
            generator_options.get_name_format() %
            (index + generator_options.get_start_index()) + '.jpg')
        img_dimension = generator_options.image_dimension
        writer = Writer(img_path, img_dimension[0], img_dimension[1])
        [writer.addObject(*l) for l in obj_det_label]
        save_path = os.path.join(
            generator_options.get_obj_det_save_path(),
            generator_options.get_name_format() %
            (index + generator_options.get_start_index()) + '.xml')
        writer.save(save_path)
        # with open(os.path.join(
        #         generator_options.obj_det_save_path,
        #         generator_options.name_format %
        #         (index + generator_options.start_index) + '.csv'), 'w') as f:
        #
        #     wr = csv.writer(f, delimiter=',')
        #     [wr.writerow(l) for l in obj_det_label]
    else:
        obj_det_label = None

    # main_window.progress_bar(len(index))
    # main_window.show()
    if (generator_options.get_save_mask()
            or generator_options.get_save_label_preview()
            or generator_options.get_save_overlay()):
        save_visuals(artificial_image, semantic_label, obj_det_label, index,
                     generator_options)
コード例 #5
0
ファイル: GenVOC.py プロジェクト: ykhorzon/CommuTag
	def CreateContent(self,imageName,w,h,bboxArr):
		writer = Writer(imageName,w,h)
		for bbox in bboxArr:
			tag = bbox["tag"]
			minX = float(bbox["x"])
			minY = float(bbox["y"])
			maxX = minX+float(bbox["width"])
			maxY = minY+float(bbox["height"])
			writer.addObject(tag,minX,minY,maxX,maxY)
		return  writer.annotation_template.render(**writer.template_parameters)
コード例 #6
0
ファイル: html2xml.py プロジェクト: hadarohana/myCosmos
def htmlfile2xml(html_f_path, output_path):
    """
    Take as input an html file, and output back an xml from that representation
    :param html_f_path: Path to html file
    :param output_path: Path to output new xml
    """
    with codecs.open(html_f_path, "r", "utf-8") as fin:
        soup = BeautifulSoup(fin, 'html.parser')
        writer = Writer(f'{os.path.basename(html_f_path)[:-5]}.png', 1920, 1920)
        writer = iterate_and_update_writer(soup, writer)
        writer.save(f'{os.path.join(output_path, os.path.basename(html_f_path)[:-5])}.xml')
コード例 #7
0
def write_label(  # pylint: disable-msg=too-many-arguments
        label_id: str, image_url: str, labels: Dict[str,
                                                    Any], label_format: str,
        images_output_dir: str, annotations_output_dir: str):
    """Writes a single Pascal VOC formatted image and label pair to disk.

    Args:
        label_id: ID for the instance to write
        image_url: URL to download image file from
        labels: Labelbox formatted labels to use for generating annotation
        label_format: Format of the labeled data. Valid options are: "WKT" and
                      "XY", default is "XY".
        annotations_output_dir: File path of directory to write Pascal VOC
                                annotation files.
        images_output_dir: File path of directory to write images.
    """
    # Download image and save it
    response = requests.get(image_url, stream=True, timeout=10.0)
    response.raw.decode_content = True
    image = Image.open(response.raw)
    image_fqn = os.path.join(
        images_output_dir, '{img_id}.{ext}'.format(img_id=label_id,
                                                   ext=image.format.lower()))
    image.save(image_fqn, format=image.format)

    # generate image annotation in Pascal VOC
    width, height = image.size
    xml_writer = PascalWriter(image_fqn, width, height)

    # remove classification labels (Skip, etc...)
    if not callable(getattr(labels, 'keys', None)):
        # skip if no categories (e.g. "Skip")
        return

    # convert label to Pascal VOC format
    for category_name, paths in labels.items():
        if label_format == 'WKT':
            xml_writer = _add_pascal_object_from_wkt(xml_writer,
                                                     wkt_data=paths,
                                                     label=category_name)
        elif label_format == 'XY':
            xml_writer = _add_pascal_object_from_xy(xml_writer,
                                                    polygons=paths,
                                                    label=category_name)
        else:
            exc = UnknownFormatError(label_format=label_format)
            logging.exception(exc.message)
            raise exc

    # write Pascal VOC xml annotation for image
    xml_writer.save(
        os.path.join(annotations_output_dir, '{}.xml'.format(label_id)))
コード例 #8
0
def dump(file_object, annotations):
    from pascal_voc_writer import Writer
    import os
    from zipfile import ZipFile
    from tempfile import TemporaryDirectory

    with TemporaryDirectory() as out_dir:
        with ZipFile(file_object, 'w') as output_zip:
            for frame_annotation in annotations.group_by_frame():
                image_name = frame_annotation.name
                width = frame_annotation.width
                height = frame_annotation.height

                writer = Writer(image_name, width, height)
                writer.template_parameters['path'] = ''
                writer.template_parameters['folder'] = ''

                for shape in frame_annotation.labeled_shapes:
                    if shape.type != "rectangle":
                        continue
                    label = shape.label
                    xtl = shape.points[0]
                    ytl = shape.points[1]
                    xbr = shape.points[2]
                    ybr = shape.points[3]
                    writer.addObject(label, xtl, ytl, xbr, ybr)

                anno_name = os.path.basename('{}.{}'.format(
                    os.path.splitext(image_name)[0], 'xml'))
                anno_file = os.path.join(out_dir, anno_name)
                writer.save(anno_file)
                output_zip.write(filename=anno_file, arcname=anno_name)
def label_video_frames(video_name):
    video_reader = cv2.VideoCapture('../video/{}.mp4'.format(video_name))

    number_of_frames = int(video_reader.get(cv2.CAP_PROP_FRAME_COUNT))
    frame_h = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
    frame_w = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH))

    for i in range(number_of_frames):
        success, frame = video_reader.read()
        if success and i % 12 == 0:
            cv2.imshow("current_frame", frame)
            key = cv2.waitKey(0)

            image_file = '../train/{}-{}'.format(video_name, i)
            absolute_path = os.path.abspath('{}.jpg'.format(image_file))

            writer = Writer(absolute_path, frame_w, frame_h)

            not_start_scene = True
            if key == ord('s'):
                not_start_scene = False
                writer.addObject('scene_start', 0, 0, frame_w, frame_h)
            elif key == ord('q'):
                break

            if not_start_scene:
                writer.addObject('not_scene_start', 0, 0, frame_w, frame_h)

            cv2.imwrite('{}.jpg'.format(image_file), frame)
            writer.save('{}.xml'.format(image_file))
コード例 #10
0
    def generate_card(self, list_image_paths, vertical_card=False):

        image_name = str(uuid.uuid4())

        image_save_path = os.path.join(self.__log_dir, image_name + '.png')

        xml_path = os.path.join(self.__log_xml_dir, image_name + '.xml')

        background_color = random.choice(self.__background_options)

        card_image = self.__image_provider.get_background(
            background_color=background_color, vertical_card=vertical_card)

        width, height = card_image.size

        writer = Writer(image_save_path, width, height)

        list_images = self.get_images_by_color(list_image_paths,
                                               background_color)

        card_image = self.paste_image(card_image=card_image,
                                      list_images=list_images,
                                      writer_xml=writer,
                                      xml_path=xml_path)

        card_image.convert('RGBA').save(image_save_path, 'PNG')
コード例 #11
0
def main(args):

    caffe.set_mode_cpu()
    model_def = args.model_def
    model_weights = args.model_weights

    net = caffe.Net(
        model_def,  # defines the structure of the model
        model_weights,  # contains the trained weights
        caffe.TEST)  # use test mode (e.g., don't perform dropout)

    mu = np.array([1.751, 1.983, 2.10])
    # create transformer for the input called 'data'
    transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})

    transformer.set_transpose(
        'data', (2, 0, 1))  # move image channels to outermost dimension
    transformer.set_mean('data',
                         mu)  # subtract the dataset-mean value in each channel
    transformer.set_raw_scale('data', 4.335)  # rescale from [0, 1] to [0, 255]
    transformer.set_channel_swap('data',
                                 (2, 1, 0))  # swap channels from RGB to BGR

    net.blobs['data'].reshape(
        1,  # batch size
        3,  # 3-channel (BGR) images
        args.image_resize,
        args.image_resize)  # image size is 227x227

    filenames = os.listdir(args.image_dir)
    images = filter(is_imag, filenames)
    for image in images:
        pic = args.image_dir + image
        input = caffe.io.load_image(pic)
        image_show = cv2.imread(pic)
        result, result2 = det(input, transformer, net)
        vis_detections(image_show, result, result2)
        if args.write_voc:
            writer = Writer(pic, input.shape[1], input.shape[0])
            write_detections(image_show, result, writer)
            base = os.path.splitext(pic)[0]
            writer.save(base + ".xml")
        else:
            print('')
            cv2.imshow("Image", image_show)
            cv2.waitKey(1000)
コード例 #12
0
def preds_to_voc_format(img_name, preds, classes_selected, class_names=[], thresh=0.5):
    if(len(classes_selected) == 1 and classes_selected[0] == ""):
        classes_selected = class_names;
    combined = [];
    wr = "";
    img = cv2.imread(img_name);
    h, w, c = img.shape;
    writer = Writer(img_name, w, h)
    i = 0;
    for j in range(len(preds[i]['rois'])):
        x1, y1, x2, y2 = preds[i]['rois'][j].astype(np.int)
        class_name = obj_list[preds[i]['class_ids'][j]]
        score = float(preds[i]['scores'][j])
        
        if(score >= thresh and class_name in classes_selected):
            writer.addObject(class_name, x1, y1, x2, y2);
            
    return writer
コード例 #13
0
def WriteNewXMLfile(pathxml,path_img,width, height,list_bd,size_min = 225):
    writer = Writer(path_img, width, height)
    for element in list_bd:
        classe_elt,xmin,ymin,xmax,ymax = element
        area = (xmax -xmin)*(ymax-ymin)
        if area <= size_min:
            # Marked as difficult
            writer.addObject(classe_elt, xmin, ymin, xmax, ymax, difficult=1)
        else:
            writer.addObject(classe_elt, xmin, ymin, xmax, ymax)
    writer.save(annotation_path=pathxml)
コード例 #14
0
def preds_to_voc_format(img_name, bounding_boxes, class_IDs, scores, class_names=[], thresh=0.5):
    combined = [];
    wr = "";
    img = cv2.imread(img_name);
    h, w, c = img.shape;
    writer = Writer(img_name, w, h)
    for i in range(len(class_IDs[0])):
        class_id = int(class_IDs[0][i][0].asnumpy());
        score = float(scores[0][i][0].asnumpy())
        bbox = bounding_boxes[0][i].asnumpy();
        x1 = int(bbox[0]);
        y1 = int(bbox[1]);
        x2 = int(bbox[2]);
        y2 = int(bbox[3]);
        if(class_id != -1 and score >= thresh):
            class_name = class_names[class_id]
            writer.addObject(class_name, x1, y1, x2, y2);
            
    return writer
コード例 #15
0
ファイル: base.py プロジェクト: mengfu188/analyze_data
def export_single_voc_from_json(ann_file, img_info):
    """
    TODO
    :param ann_file: file name for export
    :param img_info: image information for export
    :return:
    """

    writer = Writer(img_info['filepath'], img_info['width'], img_info['height'], img_info.get('depth', 3))
    ann_info = img_info['ann']

    def addObject(bboxes, labels, difficult=0):
        for bbox, label in zip(bboxes, labels):
            writer.addObject(label, bbox[0], bbox[1], bbox[2], bbox[3], difficult=difficult)

    addObject(ann_info['bboxes'], ann_info['labels'])

    if ann_info.get('bboxes_ignore') is not None:
        addObject(ann_info['bboxes_ignore'], ann_info['labels_ignore'], 1)

    writer.save(ann_file)
コード例 #16
0
def coco_to_voc(ann_file, output_dir=tempfile.gettempdir()):
    """arguments :

    ann_file : path to the annotations file (.json file)
    output_dir : path where the converted files are to be stored
    """

    coco = COCO(ann_file)
    cats = coco.loadCats(coco.getCatIds())
    cat_idx = {}
    for c in cats:
        cat_idx[c['id']] = c['name']
    for img in coco.imgs:
        catIds = coco.getCatIds()
        annIds = coco.getAnnIds(imgIds=[img], catIds=catIds)
        if len(annIds) > 0:
            img_fname = coco.imgs[img]['file_name']
            image_fname_ls = img_fname.split('.')
            image_fname_ls[-1] = 'xml'
            label_fname = '.'.join(image_fname_ls)
            writer = Writer(img_fname, coco.imgs[img]['width'],
                            coco.imgs[img]['height'])
            anns = coco.loadAnns(annIds)
            for a in anns:
                bbox = a['bbox']
                bbox = [bbox[0], bbox[1], bbox[2] + bbox[0], bbox[3] + bbox[1]]
                bbox = [str(b) for b in bbox]
                catname = cat_idx[a['category_id']]
                writer.addObject(catname, bbox[0], bbox[1], bbox[2], bbox[3])
                writer.save(output_dir + '/' + label_fname)

    return output_dir
コード例 #17
0
def detect_objects(img_path):
    image = Image.open(img_path)
    image_np = load_image_into_numpy_array(image)
    image_np_expanded = np.expand_dims(image_np, axis=0)

    (boxes, scores, classes, num) = sess.run(
        [detection_boxes, detection_scores, detection_classes, num_detections],
        feed_dict={image_tensor: image_np_expanded})

    c = np.squeeze(classes).astype(np.int32)
    s = np.squeeze(scores)
    b = np.squeeze(boxes)
    file_name = img_path.split('/')[-1]
    c_name = file_name.split('-')[0]
    for i in range(0, len(c)):
        if c[i] in CATEGORY_INDEX.keys():
            class_name = CATEGORY_INDEX[c[i]]['name']
            if class_name == c_name:
                if s is not None or s[i] >= MINIMUM_CONFIDENCE:
                    out_dir = PATH_TO_OUTPUT + "/" + c_name + "/"
                    box = tuple(b[i].tolist())
                    width, height = image.size
                    ymin, xmin, ymax, xmax = box
                    ymin *= height
                    ymax *= height
                    xmin *= width
                    xmax *= width
                    if max(ymin, xmin, ymax, xmax) > 0:
                        image = Image.fromarray(image_np.astype(np.uint8))
                        fn = file_name.replace('.jpg', '')
                        writer = Writer(out_dir + file_name, width, height)
                        writer.addObject(class_name, int(xmin), int(ymin),
                                         int(xmax), int(ymax))
                        writer.save(out_dir + fn + '.xml')
コード例 #18
0
ファイル: bbox_utils.py プロジェクト: WLChopSticks/polyp_seg
def pascal_xml_writer(imgname, cv2image, corrds, output_dir):
    h, w, _ = cv2image.shape
    writer = Writer(imgname, w, h)
    for corrd in corrds:
        writer.addObject('polyp', corrd[0], corrd[1], corrd[2], corrd[3])
    writer.save(os.path.join(output_dir, imgname.split('.')[0] + '.xml'))
    return
コード例 #19
0
def generate_one_aug(image, img_name, bbs, source_root, i, seq):
    sp = img_name.split('.')

    img_outfile = '%s/%s-%02d.%s' % (FLAGS.image_output_dir, sp[0], i, sp[-1])
    xml_outfile = '%s/%s-%02d.xml' % (FLAGS.annot_output_dir, sp[0], i)

    if os.path.exists(img_outfile) and os.path.exists(xml_outfile):
        print("%s exists" % img_outfile)
        return
    else:
        print("Generating %s" % img_outfile)

    seq_det = seq.to_deterministic()
    image_aug = seq_det.augment_images([image])[0]
    bbs_aug = seq_det.augment_bounding_boxes(
        [bbs])[0].remove_out_of_image().clip_out_of_image()

    writer = Writer(img_outfile,
                    int(float(source_root.find('size').find('width').text)),
                    int(float(source_root.find('size').find('height').text)))
    for bb in bbs_aug.bounding_boxes:
        writer.addObject(bb.label, int(bb.x1), int(bb.y1), int(bb.x2),
                         int(bb.y2))

    cv2.imwrite(img_outfile, image_aug)
    writer.save(xml_outfile)
コード例 #20
0
def new_xml(xml_file, new_f, save_dir, args):
    #parse the xmlfile
    tree = ET.parse(xml_file)
    root = tree.getroot()
    size = get_and_check(root, "size", 1)
    width = int(float(get_and_check(size, "width", 1).text))   # get original width, height
    height = int(float(get_and_check(size, "height", 1).text))
    if args.one_side:
        new_width, new_height = new_dims(width, height, common_size=args.one_side)
    elif args.target_size:
        new_width, new_height = args.target_size
    else:
        new_width, new_height = (width, height)
    writer = Writer(new_f, new_width, new_height)  #initialize new annotation writer
    for obj in get(root, "object"):                    #for each object
        label = get_and_check(obj, "name", 1).text    #get the label
        bndbox = get_and_check(obj, "bndbox", 1)
        xmin = int(float(get_and_check(bndbox, "xmin", 1).text)) 
        ymin = int(float(get_and_check(bndbox, "ymin", 1).text))    #get the original xmin, ymin, xmax, ymax
        xmax = int(float(get_and_check(bndbox, "xmax", 1).text))
        ymax = int(float(get_and_check(bndbox, "ymax", 1).text))   
        # resize the bboxes
        if args.target_size or args.one_side:  #correct the coords if we resize the image
            xmin, ymin, xmax, ymax = correct_coords(xmin, ymin, xmax, ymax, width, height, new_width, new_height)
        #make sure the bboxes are all good
        assert xmax > xmin
        assert ymax > ymin
        writer.addObject(label, xmin, ymin, xmax, ymax)   #add this object (box) to our new xml
    
    #saves the new xml in the new save directory
    base = os.path.basename(xml_file)
    writer.save(os.path.join(save_dir, base))
コード例 #21
0
def generate_pascal_xml(boxes,
                        classes,
                        scores,
                        category_index,
                        input_image_path,
                        output_xml,
                        min_score_thresh=.5):
    from pascal_voc_writer import Writer
    objects_to_include = []
    # filter by score
    image = Image.open(image_path)
    width, height = image.size
    writer = Writer(input_image_path, width, height)
    for object_index in range(len(scores)):
        # filter bad detections-
        if scores[object_index] < min_score_thresh:
            continue
        # write objets
        class_name = str(category_index[classes[object_index]]["name"])
        box = boxes[object_index]
        ymin = int(min(box[0], box[2]) * height)
        xmin = int(min(box[1], box[3]) * width)
        ymax = int(max(box[0], box[2]) * height)
        xmax = int(max(box[1], box[3]) * width)

        writer.addObject(class_name, xmin, ymin, xmax, ymax)

    print("We are going to save pascal xml in {}".format(output_xml))
    writer.save(output_xml)
コード例 #22
0
def save_images_2(parsed_data, debug=False):
    n = 0
    print("Creating dataset please wait...")
    parsed_data = pickle.loads(
        open("dataset/data/parsed_data.pickle", "rb").read())
    for data in tqdm(parsed_data):
        img_path = "dataset/images/"
        writer = Writer(img_path + str(n) + '.jpg', 256, 256)
        for d in data[1]:
            points = d['points']
            width = height = 256
            if 'Face' in d['label']:

                x1 = round(width * points[0]['x'])
                y1 = round(height * points[0]['y'])
                x2 = round(width * points[1]['x'])
                y2 = round(height * points[1]['y'])
                """
                x1 = points[0]['x']
                y1 = points[0]['y']
                x2 = points[1]['x']
                y2 = points[1]['y']
                """
                # ::addObject(name, xmin, ymin, xmax, ymax)
                writer.addObject('face', x1, y1, x2, y2)
        writer.save('dataset/images/data_VOC_pascal' + str(n) + '.xml')
        cv2.imwrite(img_path + str(n) + ".jpg",
                    cv2.resize(data[0], (256, 256)))
        n += 1
コード例 #23
0
    def extract(self, path):
        path = Path(path)
        im = cv2.imread(str(path))
        objects = parse_rec(self.bnb_path.joinpath(f'{path.stem}.xml'))
        scale_lower = 1 if len(self.angles) == 1 else 0.8

        for bbid, ebbox in enumerate(self.extract_bboxes):
            center = self._compute_center_by_bbox(ebbox)

            for aid, angle in enumerate(self.angles):
                scale = np.random.uniform(scale_lower, 1.0)
                M = cv2.getRotationMatrix2D(center, angle, scale)
                rotated_im = cv2.warpAffine(im, M, (im.shape[1], im.shape[0]))
                rotated_ob = self._affine_objects(M, objects)

                sub_im_fn = f'{path.stem}_{bbid}_{aid}'
                sub_im_path = self.out_im.joinpath(f'{sub_im_fn}.jpg')
                sub_im_orgn, sub_im_visual, sub_objects = subbbox_extract(
                    rotated_im, rotated_ob, ebbox)
                self.all_filename.append(sub_im_fn)

                cv2.imwrite(str(sub_im_path), sub_im_orgn)
                cv2.imwrite(str(self.out_vs.joinpath(f'{sub_im_fn}.jpg')),
                            sub_im_visual)

                writer = Writer(str(sub_im_path), sub_im_orgn.shape[1],
                                sub_im_orgn.shape[0])
                for obj in sub_objects:
                    writer.addObject(obj['name'], *obj['bbox'])
                writer.save(str(self.out_bb.joinpath(f'{sub_im_fn}.xml')))
コード例 #24
0
ファイル: main.py プロジェクト: jcpayne/image_bbox_tiler
    def __resize_bboxes(self, new_size, resize_factor):
        """Private Method
        """
        for xml_file in sorted(glob.glob(self.ANN_SRC + '/*.xml')):
            root, objects = extract_from_xml(xml_file)
            im_w, im_h = int(root.find('size')[0].text), int(
                root.find('size')[1].text)
            im_filename = root.find('filename').text.split('.')[0]
            an_filename = xml_file.split('/')[-1].split('.')[0]
            if resize_factor is None:
                w_scale, h_scale = new_size[0] / im_w, new_size[1] / im_h
            else:
                w_scale, h_scale = resize_factor, resize_factor
                new_size = [0, 0]
                new_size[0], new_size[1] = int(im_w * w_scale), int(im_h *
                                                                    h_scale)
                new_size = tuple(new_size)

            voc_writer = Writer('{}'.format(im_filename), new_size[0],
                                new_size[1])

            for obj in objects:
                obj_lbl = list(obj[-4:])
                obj_lbl[0] = int(obj_lbl[0] * w_scale)
                obj_lbl[1] = int(obj_lbl[1] * h_scale)
                obj_lbl[2] = int(obj_lbl[2] * w_scale)
                obj_lbl[3] = int(obj_lbl[3] * h_scale)

                voc_writer.addObject(obj[0], obj_lbl[0], obj_lbl[1],
                                     obj_lbl[2], obj_lbl[3], obj[1], obj[2],
                                     obj[3])
            voc_writer.save('{}/{}.xml'.format(self.ANN_DST, an_filename))
コード例 #25
0
def coco2voc(ann_file, output_dir):
    coco = COCO(ann_file)
    cats = coco.loadCats(coco.getCatIds())
    cat_idx = {}
    for c in cats:
        cat_idx[c['id']] = c['name']
    txtfile = open(output_dir[:-12] + 'ImageSets/Main/trainval.txt', mode='w+')
    for img in coco.imgs:
        catIds = coco.getCatIds()
        annIds = coco.getAnnIds(imgIds=[img], catIds=catIds)
        if len(annIds) > 0:
            img_fname = coco.imgs[img]['file_name']
            image_fname_ls = img_fname.split('.')
            txtfile.write(image_fname_ls[0] + '\n')
            image_fname_ls[-1] = 'xml'
            label_fname = '.'.join(image_fname_ls)
            writer = Writer(img_fname, coco.imgs[img]['width'],
                            coco.imgs[img]['height'])
            anns = coco.loadAnns(annIds)
            for a in anns:
                bbox = a['bbox']
                bbox = [bbox[0], bbox[1], bbox[2] + bbox[0], bbox[3] + bbox[1]]
                bbox = [str(b) for b in bbox]
                catname = cat_idx[a['category_id']]
                writer.addObject(catname, bbox[0], bbox[1], bbox[2], bbox[3])
                writer.save(output_dir + '/' + label_fname)
    txtfile.close()
コード例 #26
0
    def augment(self):
        """augment all images with augmentor and save augmented images and XML files
        """
        for i in range(len(self.aug)):
            self.aug[i] = self.aug[i].to_deterministic()

        images_aug = []
        bbs_aug = []
        for i in range(len(self.aug)):
            print("augmentor:", i)
            images_aug = self.aug[i].augment_images(self.images)
            bbs_aug = self.aug[i].augment_bounding_boxes(self.bbs)
            for j in range(len(bbs_aug)):
                bbs_aug[j] = bbs_aug[j].remove_out_of_image().cut_out_of_image(
                )

            for k in range(len(images_aug)):
                writer = Writer(self.images_path[k], images_aug[k].shape[1],
                                images_aug[k].shape[0])
                for l in range(len(bbs_aug[k].bounding_boxes)):
                    after = bbs_aug[k].bounding_boxes[l]
                    writer.addObject(after.label, int(after.x1), int(after.y1),
                                     int(after.x2), int(after.y2))
                xml_path = self.images_path[k].replace("JPEGImages",
                                                       "Augmented_dataset")
                xml_path = xml_path.rsplit('.', 1)[0]
                if i == 0 and (not os.path.isdir(xml_path)):
                    os.makedirs(xml_path)
                xml_file_name = os.path.basename(self.images_path[k]).rsplit(
                    ".", 1)[0] + "_" + str(i + 3) + "_aug.xml"
                xml_file = os.path.join(xml_path, xml_file_name)
                jpeg_file = xml_file.rsplit(
                    ".", 1)[0] + "." + self.images_path[k].rsplit(".", 1)[1]
                writer.save(xml_file)
                cv2.imwrite(jpeg_file, images_aug[k])
コード例 #27
0
def augment(annotation):
    seq = sequence.get()

    for i in range(AUGMENT_SIZE):
        filename = annotation['filename']
        sp = filename.split('.')
        outfile = '%s/%s-%02d.%s' % (OUTPUT_DIR, sp[0], i, sp[-1])

        seq_det = seq.to_deterministic()

        image = cv2.imread('%s/%s' % (INPUT_DIR, annotation['filename']))
        _bbs = []
        for obj in annotation['objects']:
            bb = ia.BoundingBox(x1=int(obj['xmin']),
                                y1=int(obj['ymin']),
                                x2=int(obj['xmax']),
                                y2=int(obj['ymax']),
                                label=obj['name'])
            _bbs.append(bb)

        bbs = ia.BoundingBoxesOnImage(_bbs, shape=image.shape)

        image_aug = seq_det.augment_images([image])[0]
        bbs_aug = seq_det.augment_bounding_boxes(
            [bbs])[0].remove_out_of_image().cut_out_of_image()

        writer = Writer(outfile, annotation['size']['width'],
                        annotation['size']['height'])
        for bb in bbs_aug.bounding_boxes:
            writer.addObject(bb.label, int(bb.x1), int(bb.y1), int(bb.x2),
                             int(bb.y2))

        cv2.imwrite(outfile, image_aug)
        writer.save('%s.xml' % outfile.split('.')[0])
コード例 #28
0
def dump(file_object, annotations):
    from pascal_voc_writer import Writer
    import os
    from zipfile import ZipFile
    from tempfile import TemporaryDirectory

    with TemporaryDirectory() as out_dir:
        with ZipFile(file_object, 'w') as output_zip:
            for frame_annotation in annotations.group_by_frame():
                image_name = frame_annotation.name
                width = frame_annotation.width
                height = frame_annotation.height

                writer = Writer(image_name, width, height)
                writer.template_parameters['path'] = ''
                writer.template_parameters['folder'] = ''

                for shape in frame_annotation.labeled_shapes:
                    if shape.type != "rectangle":
                        continue

                    label = shape.label
                    xtl = shape.points[0]
                    ytl = shape.points[1]
                    xbr = shape.points[2]
                    ybr = shape.points[3]

                    difficult = 0
                    truncated = 0
                    for attribute in shape.attributes:
                        if attribute.name == 'truncated' and 'true' == attribute.value.lower():
                            truncated = 1
                        elif attribute.name == 'difficult' and 'true' == attribute.value.lower():
                            difficult = 1

                    writer.addObject(
                        name=label,
                        xmin=xtl,
                        ymin=ytl,
                        xmax=xbr,
                        ymax=ybr,
                        truncated=truncated,
                        difficult=difficult,
                    )

                anno_name = os.path.basename('{}.{}'.format(os.path.splitext(image_name)[0], 'xml'))
                anno_file = os.path.join(out_dir, anno_name)
                writer.save(anno_file)
                output_zip.write(filename=anno_file, arcname=anno_name)
コード例 #29
0
def create_xml(polygons, width, height, path_to_save):
    writer = Writer('', width, height)

    for polygon in polygons:
        writer.addObject(polygon[0], int(polygon[2]), int(polygon[3]),
                         int(polygon[4]), int(polygon[5]))

    writer.save(path_to_save)
コード例 #30
0
ファイル: process.py プロジェクト: hadarohana/myCosmos
def write_matches(vote_df, out_path):
    vote_df["winner"] = vote_df.apply(get_winner, axis=1)
    # TODO if this matters we need to fill it in
    writer = Writer("n/a", 1920, 1920)
    for row in vote_df.itertuples():
        writer.addObject(row.winner, int(row.x1), int(row.y1), int(row.x2),
                         int(row.y2))
    writer.save(out_path)