Beispiel #1
0
def eval_dataset():
    data = COCOData()
    data.read_data(
        wmlu.home_dir("ai/mldata/coco/annotations/instances_val2014.json"),
        image_dir=wmlu.home_dir("ai/mldata/coco/val2014"))

    return data.get_items()
Beispiel #2
0
def default_argument_parser():
    """
    Create a parser with some common arguments used by detectron2 users.

    Returns:
        argparse.ArgumentParser:
    """
    parser = argparse.ArgumentParser(description="Arguments")
    #parser.add_argument("--config-file", default="cascade_mask_FPN_M", metavar="FILE", help="path to config file")
    parser.add_argument("--config-file",
                        default="FCOS_M.yaml",
                        metavar="FILE",
                        help="path to config file")
    parser.add_argument(
        "--resume",
        action="store_true",
        help="whether to attempt to resume from the checkpoint directory",
    )
    parser.add_argument("--eval-only",
                        action="store_true",
                        help="perform evaluation only")
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )
    parser.add_argument("--log_dir",
                        default=wmlu.home_dir("ai/tmp/object_detection2_log"),
                        type=str,
                        help="path to log dir")
    parser.add_argument("--ckpt_dir",
                        default=wmlu.home_dir("ai/tmp/object_detection2"),
                        type=str,
                        help="path to ckpt dir")
    parser.add_argument("--test_data_dir",
                        default="/2_data/wj/mldata/coco/val2017",
                        type=str,
                        help="path to test data dir")
    parser.add_argument("--save_data_dir",
                        default="/2_data/wj/mldata/coco/coco_results",
                        type=str,
                        help="path to save data dir")
    '''
    begin training datetime:
    format: YY-MM-dd HH:MM:SS如20-06-22 15:12:00
    '''
    parser.add_argument("--runtime",
                        default="",
                        type=str,
                        help="datetime to begin tarning.")
    parser.add_argument("--gpus",
                        nargs='+',
                        type=int,
                        help="gpus for training.")
    parser.add_argument("--restore",
                        type=str,
                        help="restore option.",
                        default="auto")  #auto, ckpt,finetune, none
    return parser
Beispiel #3
0
def mapillary_vistas_dataset():
    NAME2ID = {}
    ID2NAME = {}

    def name_to_id(x):
        global lid
        if x in NAME2ID:
            return NAME2ID[x]
        else:
            NAME2ID[x] = lid
            ID2NAME[lid] = x
            lid += 1
            return NAME2ID[x]
    ignored_labels = [
        'manhole', 'dashed', 'other-marking', 'static', 'front', 'back',
        'solid', 'catch-basin','utility-pole', 'pole', 'street-light','direction-back', 'direction-front'
         'ambiguous', 'other','text','diagonal','left','right','water-valve','general-single','temporary-front',
        'wheeled-slow','parking-meter','split-left-or-straight','split-right-or-straight','zigzag',
        'give-way-row','ground-animal','phone-booth','give-way-single','garage','temporary-back','caravan','other-barrier'
    ]
    data = MapillaryVistasData(label_text2id=name_to_id, shuffle=False, ignored_labels=ignored_labels)
    # data.read_data("/data/mldata/qualitycontrol/rdatasv5_splited/rdatasv5")
    # data.read_data("/home/vghost/ai/mldata2/qualitycontrol/rdatav10_preproc")
    # data.read_data("/home/vghost/ai/mldata2/qualitycontrol/rdatasv10_neg_preproc")
    data.read_data(wmlu.home_dir("ai/mldata/mapillary_vistas/mapillary-vistas-dataset_public_v2.0"))
    return data.get_boxes_items()
Beispiel #4
0
def coco2017_dataset():
    data = COCOData()
    data.read_data(wmlu.home_dir("ai/mldata2/coco/annotations/instances_train2017.json"),
                   image_dir=wmlu.home_dir("ai/mldata2/coco/train2017"))

    return data.get_items()
Beispiel #5
0
import wml_utils as wmlu
from object_detection2.snpe_toolkit.snpe_engine import SNPEEngine
import numpy as np

dlc_path = wmlu.home_dir("0day/test.dlc")
#snpe = SNPEEngine(dlc_path,output_layers=["shared_head/l2_normalize"])
snpe = SNPEEngine(dlc_path,
                  output_names=[
                      'shared_head/ct_regr/Conv_1/BiasAdd',
                      'shared_head/heat_ct/Conv_1/BiasAdd',
                      'shared_head/hw_regr/Conv_1/BiasAdd',
                      'shared_head/l2_normalize'
                  ],
                  output_layers=[
                      "shared_head/l2_normalize/Square",
                      "shared_head/hw_regr/Conv_1/Conv2D",
                      "shared_head/ct_regr/Conv_1/Conv2D",
                      "shared_head/heat_ct/Conv_1/Conv2D"
                  ],
                  output_shapes=[[1, 135, 240, 2], [1, 135, 240, 1],
                                 [1, 135, 240, 2], [1, 135, 240, 64]])
input = np.random.rand(1, 540, 960, 3)
res = snpe.forward(input.astype(np.float32))
for x in res:
    print(x.shape)
Beispiel #6
0
import numpy as np
import PIL.Image
import wml_utils as wmlu

from pycocotools import mask
import tensorflow as tf

import iotoolkit.dataset_util as dataset_util
import iotoolkit.label_map_util as label_map_util
from iotoolkit.coco_toolkit import *

flags = tf.app.flags
tf.flags.DEFINE_boolean(
    'include_masks', True, 'Whether to include instance segmentations masks '
    '(PNG encoded) in the result. default: False.')
tf.flags.DEFINE_string('data_dir', wmlu.home_dir("ai/mldata/coco"),
                       'data dir.')

FLAGS = flags.FLAGS

tf.logging.set_verbosity(tf.logging.INFO)

TRAIN_SIZE_LIMIT = None
VAL_SIZE_LIMIT = None
src_file_index = 0
IMAGE_PER_RECORD = 10000


def category_id_filter(category_id):
    return True
Beispiel #7
0
    if writer is not None:
        writer.close()

    tf.logging.info('Finished writing')


def get_save_path(save_dir, name, fidx):
    return os.path.join(save_dir, f"{name}_{fidx}.tfrecord")


def main(_):
    data_dirs = [
        "/home/wj/ai/mldata/MOT/MOT17/train",
        "/home/wj/ai/mldata/MOT/MOT17/test",
        "/home/wj/ai/mldata/MOT/MOT15/train",
        "/home/wj/ai/mldata/MOT/MOT15/test",
        "/home/wj/ai/mldata/MOT/MOT20/train",
    ]

    train_output_path = FLAGS.output_dir
    if not tf.gfile.IsDirectory(train_output_path):
        tf.gfile.MakeDirs(train_output_path)

    _create_tf_record(data_dirs, train_output_path, name='mot_train')


if __name__ == '__main__':
    FLAGS.output_dir = wmlu.home_dir("ai/mldata/MOT/tfdata_mot_train")
    tf.app.run()
        def text_fn(classes, scores):
            return f"{ID2NAME[classes]}"

        if len(category_ids) == 0:
            continue

        wmlu.show_dict(NAME2ID)
        odv.draw_bboxes_and_maskv2(img=img,
                                   classes=category_ids,
                                   scores=None,
                                   bboxes=boxes,
                                   masks=binary_mask,
                                   color_fn=None,
                                   text_fn=text_fn,
                                   thickness=4,
                                   show_text=True,
                                   fontScale=0.8)
        base_name = os.path.basename(full_path)
        save_path = os.path.join(save_dir, base_name)
        wmli.imwrite(save_path, img)
        i += 1
        if i >= nr:
            break


if __name__ == "__main__":
    with open("/home/wj/ai/mldata/mapillary_vistas/config_v2.0.json") as f:
        data = json.load(f)
    save_dir = wmlu.home_dir("ai/tmp/mv2")
    for d in data['labels']:
        view_data(d['name'], save_dir)
def view_data(name, save_dir, nr=20):
    print(f"View {name}")
    raw_name = name
    names = name.split("--")
    if names[0] == "void" or "ambiguous" in raw_name:
        return
    if "road" not in raw_name:
        return
    for x in names:
        save_dir = os.path.join(save_dir, x)

    wmlu.create_empty_dir(save_dir, remove_if_exists=False)

    allowed_names = [raw_name]
    NAME2ID = {}
    ID2NAME = {}

    def name_to_id(x):
        global lid
        if x in NAME2ID:
            return NAME2ID[x]
        else:
            NAME2ID[x] = lid
            ID2NAME[lid] = x
            lid += 1
            return NAME2ID[x]

    data = MapillaryVistasData(label_text2id=name_to_id,
                               shuffle=False,
                               ignored_labels=None,
                               label_map=None,
                               allowed_labels_fn=allowed_names)
    data.read_data(wmlu.home_dir("ai/mldata/mapillary_vistas"))

    i = 0
    for x in data.get_items():
        full_path, img_info, category_ids, category_names, boxes, binary_mask, area, is_crowd, num_annotations_skipped = x
        img = wmli.imread(full_path)

        def text_fn(classes, scores):
            return f"{ID2NAME[classes]}"

        if len(category_ids) == 0:
            continue

        wmlu.show_dict(NAME2ID)
        odv.draw_bboxes_and_maskv2(img=img,
                                   classes=category_ids,
                                   scores=None,
                                   bboxes=boxes,
                                   masks=binary_mask,
                                   color_fn=None,
                                   text_fn=text_fn,
                                   thickness=4,
                                   show_text=True,
                                   fontScale=0.8)
        base_name = os.path.basename(full_path)
        save_path = os.path.join(save_dir, base_name)
        wmli.imwrite(save_path, img)
        i += 1
        if i >= nr:
            break
Beispiel #10
0
# -*- coding: utf-8 -*-
import os
from .build import DATASETS_REGISTRY
from datasets_tools.pascal_voc_tf_decodev2 import get_data as voc_get_data
from datasets_tools.coco_tf_decode import get_data as coco_get_data
from datasets_tools.coco_tf_kp_decode import get_data as coco_kp_get_data
from iotoolkit.coco_toolkit import ID_TO_TEXT as coco_id_to_text
from iotoolkit.coco_toolkit import COMPRESSED_ID_TO_TEXT as coco_compressed_id_to_text
from iotoolkit.pascal_voc_data import ID_TO_TEXT as pascal_voc_id_to_text
from datasets_tools.mot_tf_decode import get_data as mot_get_data
import wml_utils as wmlu

# ==== Predefined datasets and splits for COCO ==========
dataset_root_path = wmlu.home_dir("ai/mldata")

default_category_index = None
coco_category_index = {}
pascal_voc_category_index = {}
mod_category_index = {1: "rectangle", 2: "triangle", 3: "ellipse"}
modgeo_category_index = {
    1: "NSNN",
    2: "SNN",
    3: "SUN",
    4: "SNU",
    5: "SUU",
    6: "BNN",
    7: "NBNN",
    8: "BUN",
    9: "BNU",
    10: "BUU",
}
Beispiel #11
0
    def name_to_id(x):
        global id
        if x in NAME2ID:
            return NAME2ID[x]
        else:
            NAME2ID[x] = id
            ID2NAME[id] = x
            id += 1
            return NAME2ID[x]

    data = CityscapesData(label_text2id=name_to_id, shuffle=False)
    # data.read_data("/data/mldata/qualitycontrol/rdatasv5_splited/rdatasv5")
    # data.read_data("/home/vghost/ai/mldata2/qualitycontrol/rdatav10_preproc")
    # data.read_data("/home/vghost/ai/mldata2/qualitycontrol/rdatasv10_neg_preproc")
    data.read_data(wmlu.home_dir("ai/mldata/cityscapes"))

    def filter(x):
        return x in ['road', 'parking']
        #return x in ['person', 'parking']
        #return x in ['terrain']
        #return x in ['car']

    # data.read_data("/home/vghost/ai/mldata2/qualitycontrol/x")
    for x in data.get_items():
        full_path, img_info, category_ids, category_names, boxes, binary_mask, area, is_crowd, num_annotations_skipped = x
        img = wmli.imread(full_path)

        def text_fn(classes, scores):
            return f"{ID2NAME[classes]}"
    files_data = list(enumerate(files))
    if fidx != 0:
        _files_data = []
        for fid, file_d in files_data:
            _files_data.append([fid + fidx, file_d])
        files_data = _files_data

    sys.stdout.flush()
    pool = Pool(13)
    pool.map(functools.partial(make_tfrecord, output_dir=output_dir, name=name, label_text_to_id=label_text_to_id),
             files_data)
    pool.close()
    pool.join()

    print('\nFinished converting the dataset total %d examples.!' % (len(files)))


def label_text_2_id(label):
    dicts = {'a':0,'b':1}
    return dicts[label]
if __name__ == "__main__":
    dataset_dir = wmlu.home_dir("ai/mldata/court_detection/train_data1/")
    output_dir = wmlu.home_dir("ai/mldata/court_detection/train_tfrecord1/")
    output_name = "train"

    print('Dataset directory:', dataset_dir)
    print('Output directory:', output_dir)
    random.seed(int(time.time()))

    multithread_create_tf_record(dataset_dir, output_dir, fidx=0,label_text_to_id=label_text_2_id)
        'solid', 'catch-basin','utility-pole', 'pole', 'street-light','direction-back', 'direction-front'
         'ambiguous', 'other','text','diagonal','left','right','water-valve','general-single','temporary-front',
        'wheeled-slow','parking-meter','split-left-or-straight','split-right-or-straight','zigzag',
        'give-way-row','ground-animal','phone-booth','give-way-single','garage','temporary-back','caravan','other-barrier',
        'chevron','pothole','sand'
    ]
    label_map = {
        'individual':'person',
        'cyclists':'person',
        'other-rider':'person'
    }
    data = MapillaryVistasData(label_text2id=name_to_id, shuffle=False, ignored_labels=ignored_labels,label_map=label_map)
    # data.read_data("/data/mldata/qualitycontrol/rdatasv5_splited/rdatasv5")
    # data.read_data("/home/vghost/ai/mldata2/qualitycontrol/rdatav10_preproc")
    # data.read_data("/home/vghost/ai/mldata2/qualitycontrol/rdatasv10_neg_preproc")
    data.read_data(wmlu.home_dir("ai/mldata/mapillary_vistas/mapillary-vistas-dataset_public_v2.0"))


    def filter(x):
        return x in ['general-single', 'parking', 'temporary', 'general-horizontal']
        # return x in ['terrain']
        # return x in ['car']


    # data.read_data("/home/vghost/ai/mldata2/qualitycontrol/x")
    for x in data.get_items():
        full_path, img_info, category_ids, category_names, boxes, binary_mask, area, is_crowd, num_annotations_skipped = x
        img = wmli.imread(full_path)


        def text_fn(classes, scores):
Beispiel #14
0
from object_detection2.modeling.backbone import *
from object_detection2.modeling.backbone.dla import build_any_dla_backbone
from object_detection2.config.config import get_cfg
import tensorflow as tf
from object_detection2.modeling.backbone.mobilenets import *
import wml_utils as wmlu
import wmodule

global_cfg = get_cfg()
#global_cfg.MODEL.MOBILENETS.MINOR_VERSION = "SMALL"
global_cfg.MODEL.DLA.BACKBONE = "build_hrnet_backbone"
global_cfg.MODEL.RESNETS.DEPTH = 34
#global_cfg.MODEL.MOBILENETS.MINOR_VERSION = "LARGE"
net = tf.placeholder(tf.float32, [2, 512, 512, 3])
x = {'image': net}
parent = wmodule.WRootModule()
mn = build_any_dla_backbone(global_cfg, parent=parent)
res = mn(x)

sess = tf.Session()
summary_writer = tf.summary.FileWriter(wmlu.home_dir("ai/tmp/tools_log"),
                                       sess.graph)
Beispiel #15
0
        mapillary_name2color[label] = color

    # calculate label color mapping
    colormap = []
    for i in range(0, len(ID_TO_NAME)):
        name = ID_TO_NAME[i]
        m_name = NAME_TO_MAPILLARY_NAME[name]
        color = mapillary_name2color[m_name]
        colormap = colormap + color
    colormap += [0, 0, 0] * (256 - len(ID_TO_NAME))
    return colormap

if __name__ == "__main__":
    dataset = SemanticData(img_suffix=".jpg",label_suffix=".png",img_sub_dir="boe_labels",label_sub_dir="boe_labels")
    dataset.read_data("/home/wj/ai/mldata/boesemantic")
    save_dir = wmlu.home_dir("ai/tmp/boe_images2")
    wmlu.create_empty_dir(save_dir,remove_if_exists=False)
    color_map = fill_colormap_and_names("/home/wj/ai/mldata/mapillary_vistas/config_v2.0.json")
    def text_fn(l):
        if l in ID_TO_READABLE_NAME:
            return ID_TO_READABLE_NAME[l]
        else:
            return "NA"
    def color_fn(l):
        return color_map[l*3:l*3+3]

    legend_img = draw_legend(list(ID_TO_NAME.keys()),text_fn,img_size=(2448,300),color_fn=color_fn)
    for ifn,img,mask in dataset.get_items():
        base_name = wmlu.base_name(ifn)
        wmlu.safe_copy(ifn,save_dir)
        rgb_mask = convert_semantic_to_rgb(mask,color_map,True)
Beispiel #16
0
import wml_utils as wmlu
import iotoolkit.coco_tf_decodev2 as cocodecode
import iotoolkit.coco_toolkit as cocot
import wsummary
import iotoolkit.transform as trans
from object_detection2.config.config import CfgNode as CN
from object_detection2.data.datasets.build import DATASETS_REGISTRY
from object_detection2.data.dataloader import *
from wml_utils import AvgTimeThis
from object_detection2.standard_names import *
from object_detection2.config.config import *
import os

os.environ['CUDA_VISIBLE_DEVICES'] = ''

tf.app.flags.DEFINE_string('logdir', wmlu.home_dir("ai/tmp/tools_logdir/"),
                           "Logdir path")

FLAGS = tf.app.flags.FLAGS
slim = tf.contrib.slim
from object_detection2.data.buildin_dataprocess import DATAPROCESS_REGISTRY

aa = trans.RandomSelectSubTransform(
    [[
        trans.WRandomTranslate(prob=1, pixels=20),
        trans.WRandomTranslate(prob=0.5, pixels=20)
    ],
     [
         trans.WRandomTranslate(prob=1, pixels=20, translate_horizontal=False),
         trans.WRandomTranslate(prob=0.5,
                                pixels=20,
Beispiel #17
0
    if writer is not None:
        writer.close()

    tf.logging.info('Finished writing')


def get_save_path(save_dir, name, fidx):
    return os.path.join(save_dir, f"{name}_{fidx}.tfrecord")


def main(_):
    data_dirs = [
        "/home/wj/ai/mldata/MOT/MOT17/train",
        "/home/wj/ai/mldata/MOT/MOT17/test",
        "/home/wj/ai/mldata/MOT/MOT15/train",
        "/home/wj/ai/mldata/MOT/MOT15/test1",
        "/home/wj/ai/mldata/MOT/MOT20/train",
        "/home/wj/ai/mldata/MOT/MOT20/test",
    ]

    train_output_path = FLAGS.output_dir
    if not tf.gfile.IsDirectory(train_output_path):
        tf.gfile.MakeDirs(train_output_path)

    _create_tf_record(data_dirs, train_output_path, name='mot_train')


if __name__ == '__main__':
    FLAGS.output_dir = wmlu.home_dir("ai/mldata/MOT/tfdata_mot_small_trainv2")
    tf.app.run()
Beispiel #18
0
    assert FLAGS.val_annotations_file, '`val_annotations_file` missing.'

    if not tf.gfile.IsDirectory(FLAGS.output_dir):
        tf.gfile.MakeDirs(FLAGS.output_dir)
    train_output_path = os.path.join(FLAGS.output_dir, 'train_coco0.tfrecord')
    val_output_path = os.path.join(FLAGS.output_dir, 'train_coco1.tfrecord')

    _create_tf_record_from_coco_annotations(FLAGS.train_annotations_file,
                                            FLAGS.train_image_dir,
                                            train_output_path,
                                            FLAGS.include_masks,
                                            is_train_data=True)

    _create_tf_record_from_coco_annotations(FLAGS.val_annotations_file,
                                            FLAGS.val_image_dir,
                                            val_output_path,
                                            FLAGS.include_masks,
                                            is_train_data=True)


if __name__ == '__main__':
    SCRATCH_DIR = wmlu.home_dir("ai/mldata/coco")
    FLAGS.train_image_dir = os.path.join(SCRATCH_DIR, "train2014")
    FLAGS.val_image_dir = os.path.join(SCRATCH_DIR, "val2014")
    FLAGS.train_annotations_file = os.path.join(
        SCRATCH_DIR, "annotations/instances_train2014.json")
    FLAGS.val_annotations_file = os.path.join(
        SCRATCH_DIR, "annotations/instances_val2014.json")
    FLAGS.output_dir = output_dir = os.path.join(SCRATCH_DIR, "voc_tfdata1")
    tf.app.run()