Ejemplo n.º 1
0
def object2string(filepath, obj, label_table=None):
    an_datum = pb.AnnotatedDatum()
    an_datum.type = pb.AnnotatedDatum.BBOX
    h, w = read_file_datum(filepath, an_datum.datum)
    group = {}
    for box in obj:
        #cls = box['name']
        if label_table is not None:
            tm_name = box['label']
            if not (is_number(tm_name) or is_char(tm_name)):
                tm_name = '*'
            if tm_name not in label_table:
                label_table[tm_name] = len(label_table) + 1
            cls = label_table[tm_name]
        else:
            cls = int(box['name']) + 1
        if cls not in group:
            group[cls] = []
        group[cls].append(box)
    for k, v in group.items():
        g = an_datum.annotation_group.add()
        g.group_label = int(k)
        for i in range(len(v)):
            instance = g.annotation.add()
            instance.instance_id = i
            bbox = instance.bbox
            bbox.xmin = float(v[i]['xmin']) / w
            bbox.ymin = float(v[i]['ymin']) / h
            bbox.xmax = float(v[i]['xmax']) / w
            bbox.ymax = float(v[i]['ymax']) / h
            bbox.difficult = (int(v[i]['difficult']) <> 0)
            bbox.angle = float(ssd_util.degree2angle(v[i]['degree'], w, h))
    return an_datum.SerializeToString()
Ejemplo n.º 2
0
def process(args):
    '''main '''
    ###START
    #     evalfile = "/train/execute/Tiny/testlabeled.txt"
    #     detailfile = "/train/execute/Tiny2/PR_detail.txt"
    outsavedir = args.output_dir

    detection = CaffeDetection(args.gpu, args.prototxt, args.model, args.size,
                               args.labelmap)
    pred_conf_list = []
    image_label_list = []
    labelmaplist = get_labelmaplist(detection.labelmap)

    if args.lmdb is None:
        evalfile = args.image_list
        filelist = np.array(pd.read_csv(evalfile, sep='\n',
                                        header=None)).tolist()
        for i, item in enumerate(tqdm(filelist)):
            item_jpg = os.path.join(args.imgdir_add, item[0].split(' ')[0])
            #         print("dealing img:{}".format(item_jpg))
            result = detection.detect(item_jpg)
            image_label_list.append(item[0])
            pred_conf_list.append(result)
    else:
        import lmdb
        import cv2
        lmdb_env = lmdb.open(args.lmdb, readonly=True)
        with lmdb_env.begin() as lmdb_txn:
            lmdb_cursor = lmdb_txn.cursor()
            datum = caffe_pb2.AnnotatedDatum()
            innerdatum = caffe_pb2.Datum()

            for key, value in tqdm(lmdb_cursor):

                datum.ParseFromString(value)
                #                 imgarray = np.fromstring(datum.datum.data, dtype=np.uint8)
                #                 imgarray = imgarray.reshape(datum.datum.channels, datum.datum.height, datum.datum.width)
                data_array = np.asarray(bytearray(datum.datum.data),
                                        dtype="uint8")
                img_array = cv2.imdecode(data_array, cv2.IMREAD_COLOR)
                #                 pdb.set_trace()
                #                 innerdatum.ParseFromString(datum.datum)
                #                 data_array = caffe.io.datum_to_array(datum.datum)[0]

                img_array = img_array / 255.0
                result = detection.detect(img_array)
                imageitem = key.lstrip("00000000_")
                image_label_list.append(imageitem)
                pred_conf_list.append(result)


#         lmdb_env.close()

    if not os.path.exists(outsavedir):
        os.mkdir(outsavedir)
    with open(outsavedir + '/ssdpredfile.npz', 'wb') as scoreoutfile:
        pickle.dump(labelmaplist, scoreoutfile, True)
        pickle.dump(image_label_list, scoreoutfile, True)
        pickle.dump(pred_conf_list, scoreoutfile, True)
    print("data saved")
def open_lmdb(lmdb_path):

    lmdb_env = lmdb.open(lmdb_path)

    lmdb_txn = lmdb_env.begin()  # 生成处理句柄
    lmdb_cursor = lmdb_txn.cursor()  # 生成迭代器指针
    annotated_datum = caffe_pb2.AnnotatedDatum()  # AnnotatedDatum结构

    return lmdb_cursor, annotated_datum
Ejemplo n.º 4
0
def datum_read():
    import lmdb
    import numpy as np
    import cv2
    from caffe.proto import caffe_pb2
    # /home/liuky/HDD_1/soft/caffe-ssd/examples/VOC0712/VOC0712_trainval_lmdb
    # /home/liuky/HDD_1/data/tmp_lmdb/smoke_data_lmdb_300
    lmdb_env = lmdb.open('/home/liuky/HDD_1/data/tmp_lmdb/test_data')

    lmdb_txn = lmdb_env.begin()  # 生成处理句柄
    lmdb_cursor = lmdb_txn.cursor()  # 生成迭代器指针
    annotated_datum = caffe_pb2.AnnotatedDatum()  # AnnotatedDatum结构

    for idx, (key, value) in enumerate(lmdb_cursor):
        # print(idx)
        # continue

        annotated_datum.ParseFromString(value)
        datum = annotated_datum.datum  # Datum结构
        grps = annotated_datum.annotation_group  # AnnotationGroup结构,一个group表示一个lebel类,每个group下又有复数个annotation表示检测框box
        type = annotated_datum.type
        for grp in grps:
            label = grp.group_label
            for annotation in grp.annotation:
                instance_id = annotation.instance_id
                xmin = annotation.bbox.xmin * datum.width  # Annotation结构
                ymin = annotation.bbox.ymin * datum.height
                xmax = annotation.bbox.xmax * datum.width
                ymax = annotation.bbox.ymax * datum.height

        # Datum结构的label以及三个维度
        _ = datum.label  # 在目标检测的数据集中,这个label是没有意义的,真正的label是上面的group_label
        channels = datum.channels
        height = datum.height
        width = datum.width
        image_x = np.fromstring(datum.data, dtype=np.uint8)  # 字符串转换为矩阵

        if channels == 3:
            image = cv2.imdecode(image_x, -1)  # decode
            cv2.imwrite('/home/liuky/HDD_1/tmp.jpg', image)
        else:
            img_datas = []
            for channel_idx in range(channels):
                start_idx = channel_idx * height * width
                single_channel = image_x[start_idx:(start_idx +
                                                    height * width)]
                single_channel = np.reshape(single_channel, (height, width))
                img_datas.append(single_channel[:, :, np.newaxis])
            image = np.concatenate(img_datas, -1)
            tmp_img = image[:, :, [0]]
            cv2.imwrite('/home/liuky/HDD_1/tmp.jpg',
                        cv2.resize(tmp_img, (1280, 720)))

        print('')
Ejemplo n.º 5
0
def ReadLmdb(lmdb_path):
    env = lmdb.open(lmdb_path)
    txn = env.begin()
    anno_datum = caffe_pb2.AnnotatedDatum()
    for key, val in txn.cursor():
        anno_datum.ParseFromString(val)
        data = anno_datum.datum.data
        print(key, anno_datum)
        np_data = np.frombuffer(data, dtype=np.uint8)
        cv_img = cv2.imdecode(np_data, -1)
        cv2.imshow('test', cv_img)
        cv2.waitKey(0)
    env.close()
    return
Ejemplo n.º 6
0
def anno2datum(img, bboxes):
    if len(bboxes) == 0:
        return
    annotated_datum = caffe_pb2.AnnotatedDatum()
    annotated_datum.type = annotated_datum.BBOX
    datum = annotated_datum.datum
    datum.channels = img.shape[2]
    datum.width = img.shape[1]
    datum.height = img.shape[0]
    datum.encoded = True
    datum.label = -1
    datum.data = cv2.imencode('.jpg', img)[1].tobytes()
    groups = annotated_datum.annotation_group
    for bbox in bboxes:
        found_group = False
        instance_id = 0
        label = int(bbox[4]) + 1  # background is 0
        for group in groups:
            if group.group_label == label:
                if len(group.annotation) == 0:
                    instance_id = 0
                else:
                    instance_id = len(group.annotation)
                found_group = True
                annotation = group.annotation.add()
                break
        if not found_group:
            group = groups.add()
            instance_id = 0
            group.group_label = label
            annotation = group.annotation.add()
        annotation.instance_id = instance_id
        annotation.bbox.xmin = bbox[0] * 1.0 / img.shape[1]
        annotation.bbox.ymin = bbox[1] * 1.0 / img.shape[0]
        annotation.bbox.xmax = bbox[2] * 1.0 / img.shape[1]
        annotation.bbox.ymax = bbox[3] * 1.0 / img.shape[0]
    return annotated_datum
Ejemplo n.º 7
0
# input preprocessing: 'data' is the name of the input blob == net.inputs[0]
print(net.blobs['data'].data.shape)

idx = 1
# if os.path.exists('/home/aiserver/result/word_dect/res_img/'):
#     shutil.rmtree('/home/aiserver/result/word_dect/res_img/')
#
# os.mkdir('/home/aiserver/result/word_dect/res_img/')

lmdb_env = lmdb.open(
    '/media/aiserver/disk2/word_dect/pipline_data_v2/lmdb/pipline_data_v2_test_lmdb'
)

lmdb_txn = lmdb_env.begin()  # 生成处理句柄
lmdb_cursor = lmdb_txn.cursor()  # 生成迭代器指针
annotated_datum = caffe_pb2.AnnotatedDatum()  # AnnotatedDatum结构
#
# bei_x = 100000
# bei_y = 100000
# for bei_x in range()

bei_x = 0
bei_y = 0
idx = 0
for off_set in [2, 3, 4]:
    for bei_xx in range(0, 1):
        for bei_yy in [6, 7, 8, 9]:
            res = open(
                "---new_ptlogResNet256-lr0.001-100000-0.5-deconv-inputw900_390_re.txt",
                "a")
            analyze_scores = []
Ejemplo n.º 8
0
import lmdb
import caffe
import cv2
from caffe.proto import caffe_pb2
import numpy as np

lmdb_file = "/home/tharun/data/ILSVRC/lmdb/DET/ILSVRC2016_trainval1_lmdb_aug"
lmdb_env = lmdb.open(lmdb_file)
lmdb_txn = lmdb_env.begin()
lmdb_cursor = lmdb_txn.cursor()
datum = caffe_pb2.AnnotatedDatum()

for key, value in lmdb_cursor:
    datum.ParseFromString(value)
    data = datum.datum
    grp = datum.annotation_group

    arr = np.frombuffer(data.data, dtype='uint8')
    img = cv2.imdecode(arr, cv2.IMREAD_COLOR)
    width = img.shape[1]
    height = img.shape[0]
    for annotation in grp:
        for bbox in annotation.annotation:
            cv2.rectangle(
                img,
                (int(bbox.bbox.xmin * width), int(bbox.bbox.ymin * height)),
                (int(bbox.bbox.xmax * width), int(bbox.bbox.ymax * height)),
                (0, 255, 0))
    cv2.imshow('decoded image', img)
    cv2.waitKey(3000)
    break
Ejemplo n.º 9
0
def GenerateAnnotatedDatum(img_data, label_map, objs, width, height, img_label = -1):
    '''

    :param img_data: CHW
    :param label_map: name map to label
    :param objs: object dictionaries include xmin, ymin, xmax, ymax, name, difficult. See voc xml.
    :param width: original image width, not resized
    :param height: original image height, not resized
    :param img_label: discarded
    :return:
    '''
    if img_data.dtype != 'uint8':
        print('Image data type must be unsigned byte!')
    anno_datum = caffe_pb2.AnnotatedDatum()
    anno_datum.type = 0
    enc_data = cv2.imencode('.jpg', img_data)
    anno_datum.datum.encoded = True
    anno_datum.datum.data = np.array(enc_data[1]).tostring()
    anno_datum.datum.channels = img_data.shape[2]
    anno_datum.datum.height = img_data.shape[0]
    anno_datum.datum.width = img_data.shape[1]
    anno_datum.datum.label = img_label

    # datum = caffe.io.array_to_datum(img_data, img_label)
    # anno_datum.datum.channels = datum.channels
    # anno_datum.datum.height = datum.height
    # anno_datum.datum.width = datum.width
    # anno_datum.datum.label = datum.label
    # anno_datum.datum.data = datum.data
    # anno_datum.datum.float_data.extend(datum.float_data)
    # anno_datum.datum.encoded = True

    for obj in objs:
        group_label = label_map[obj['name']]
        found_group = False
        for anno_group in anno_datum.annotation_group:
            if group_label == anno_group.group_label:
                if len(anno_group.annotation) == 0:
                    instance_id = 0
                else:
                    instance_id = anno_group.annotation[len(anno_group.annotation) - 1].instance_id + 1
                anno = anno_group.annotation.add()
                found_group = True
        if not found_group:
            anno_group = anno_datum.annotation_group.add()
            anno_group.group_label = group_label
            anno = anno_group.annotation.add()
            instance_id = 0
        anno.instance_id = instance_id
        instance_id += 1
        bbox = anno.bbox
        if obj['xmin'] > width or obj['ymin'] > height or obj['xmax'] > width \
                or obj['ymax'] > height or obj['xmin'] < 0 or obj['ymin'] < 0 or obj['xmax'] < 0 or obj['ymax'] < 0:
            print('Bounding box exceeds image boundary.')
        if obj['xmin'] > obj['xmax'] or obj['ymin'] > obj['ymax']:
            print('Bounding box irregular.')
        bbox.xmin = obj['xmin'] / width
        bbox.ymin = obj['ymin'] / height
        bbox.xmax = obj['xmax'] / width
        bbox.ymax = obj['ymax'] / height
        bbox.difficult = obj['difficult']
    return anno_datum
Ejemplo n.º 10
0
def lmdb2image(args, show=False, gen_anchors=True, normalized=True):
    data_dir = "data/" + args.dataset
    lmdb_dir = data_dir + "/lmdb/" + args.split + "_lmdb"
    if not os.path.exists(lmdb_dir):
        print(lmdb_dir + " not exists")
        return
    db = lmdb.open(lmdb_dir)
    txn = db.begin()
    cursor = txn.cursor()
    annotated_datum = caffe_pb2.AnnotatedDatum()
    if gen_anchors:
        data = []
    labels = CLASSES[args.dataset]
    statics = len(labels) * [0]
    index = 0
    num_images = txn.stat()['entries']
    pbar = tqdm(range(num_images))
    for key, value in cursor:
        pbar.set_description("{}/{}".format(index, num_images))
        pbar.update(1)
        index += 1
        annotated_datum.ParseFromString(value)
        groups = annotated_datum.annotation_group
        #print(len(groups))
        if show or not normalized:
            datum = annotated_datum.datum
            img = np.fromstring(datum.data, dtype=np.uint8)
            img = cv2.imdecode(img, -1)
            height, width, _ = img.shape
        for group in groups:
            for annotation in group.annotation:
                bbox = annotation.bbox
                if bbox.xmax - bbox.xmin <= 0 or bbox.ymax - bbox.ymin <= 0:
                    continue
                labelindex = group.group_label - 1
                label = labels[labelindex] + "_" + str(annotation.instance_id)
                statics[labelindex] += 1
                if show or not normalized:
                    x1 = int(bbox.xmin * width)
                    y1 = int(bbox.ymin * height)
                    x2 = int(bbox.xmax * width)
                    y2 = int(bbox.ymax * height)
                    cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0))
                    cv2.putText(img, label, (x1, y1), 3, 1, (0, 0, 255))
                if gen_anchors:
                    if normalized:
                        data.append(
                            [bbox.xmax - bbox.xmin, bbox.ymax - bbox.ymin])
                    else:
                        data.append([x2 - x1, y2 - y1])
        if args.savegt:
            filename = key.decode().replace("/", "_")
            cv2.imwrite("output/gt/" + filename, img)
        if show:
            cv2.putText(img, key.decode(), (0, 20), 3, 1, (0, 0, 255))
            cv2.imshow("img", img)
            cv2.waitKey()
    total = 0
    for i, st in enumerate(statics):
        total += st
        print(labels[i] + ": " + str(st))
    print("-------Total: " + str(total))
    if gen_anchors:
        from get_anchors import get_anchors
        get_anchors(data)