def main():
    parser = argparse.ArgumentParser(description='Process some integers.')
    parser.add_argument('--root_image_folder', required=True)
    parser.add_argument(
        '-c',
        '--config',
        required=False,
        default="./config/vision_server.config.json",
        help=
        "Path to the configuration file with the settings for Object Detection"
    )
    args = parser.parse_args()

    if not os.path.isdir(args.root_image_folder):
        print("root_image_folder does not exist...")
        exit()

    config = tools.load_config(args.config)

    label_map = label_map_util.load_labelmap(config["label_map"])
    map_categories = label_map_util.convert_label_map_to_categories(
        label_map,
        max_num_classes=config["num_classes"],
        use_display_name=True)

    categories = []
    for category in map_categories:
        categories.append(category["name"])

    final_config = {}
    annotations_total = 0
    for file in glob.glob(os.path.join(args.root_image_folder, "*/*.json")):
        config = tools.read_json_file(file)
        annotations_count = 0

        for image in config:
            if image["filename"] not in final_config and len(
                    image["annotations"]) > 0:
                annotations = []
                for annotation in image["annotations"]:
                    if annotation["class"] in categories:
                        annotations.append(annotation)

                if len(annotations) > 0:
                    image["annotations"] = annotations
                    final_config[image["filename"]] = image
                    annotations_count += len(annotations)

        print("{} length: {} annotations: {}".format(file, len(config),
                                                     annotations_count))
        annotations_total += annotations_count

    final_json_content = []
    for image in final_config.values():
        final_json_content.append(image)

    output_file = os.path.join(args.root_image_folder, "final_config.json")
    print("final config file: {} images: {} annotations: {}".format(
        output_file, len(final_json_content), annotations_total))
    tools.write_json_file(output_file, final_json_content)
Esempio n. 2
0
def main():
    parser = argparse.ArgumentParser(description='Process some integers.')
    parser.add_argument('--final_config_file', required=True)
    parser.add_argument('--final_folder', required=True)
    parser.add_argument('--src_folder', required=True)
    args = parser.parse_args()

    if not os.path.isfile(args.final_config_file):
        print("final_config_file does not exist...")
        exit()

    if not os.path.isdir(args.src_folder):
        print("src_folder does not exist...")
        exit()

    if not os.path.isdir(args.final_folder):
        os.makedirs(args.final_folder)
        exit()

    final_config = []
    config = tools.read_json_file(args.final_config_file)

    for image in config:
        if image["filename"] not in final_config and len(
                image["annotations"]) > 0:
            final_config.append(image)

    folder_id = 1
    image_set = []

    print("split {} images".format(len(final_config)))

    for image in final_config:
        image_set.append(image)

        if len(image_set) == 251:
            file = write_images(image_set, args, folder_id)
            print("file: {} created".format(file))
            folder_id += 1
            image_set = []

    if len(image_set) > 0:
        write_images(image_set, args, folder_id)
        print("last file: {} created".format(file))
Esempio n. 3
0
def main():
    parser = argparse.ArgumentParser(description='Process some integers.')
    parser.add_argument('--root_image_folder', required=True)
    args = parser.parse_args()

    if not os.path.isdir(args.root_image_folder):
        print("root_image_foldere does not exist...")
        exit()

    for file in glob.glob(os.path.join(args.root_image_folder, "*/*.json")):
        sloth_config = []
        config = tools.read_json_file(file)

        for image in config:
            if image["filename"] not in sloth_config and len(
                    image["annotations"]) > 0:
                sloth_config.append(image)
        print("file: {}".format(file))
        tools.write_json_file(file, sloth_config)
Esempio n. 4
0
def main():
    args = get_args()
    validate_images = []

    for extension in extensions:
        for file in glob.glob(args.image_dir + '*.' + extension):
            validate_images.append(file)

    config = tools.load_config(args.config)

    validation_config = tools.read_json_file(args.validate_json)

    config_images = {}
    for image in validation_config:
        config_images[image["filename"]] = image["annotations"]

    if len(config_images) == 0:
        print("no images found in config...")
        exit()

    if args.graph is not None:
        config["path_to_ckpt"] = args.graph

    od = ObjectDetection(config, args.image_width, args.image_height,
                         args.gpu_usage)
    od.start_up()

    total_classes_in_config = 0
    predicted_valid = 0
    predicted_false = 0
    prediction_time = 0
    validated_images = 0
    empty_result = 0

    validate_images.sort()
    for image in validate_images:
        image_file = os.path.basename(image)

        if image_file not in config_images.keys():
            #print("Skip image {} cause not in config...".format(image_file))
            continue
        else:
            validated_images += 1

        imgcv = cv2.imread(image)
        start = time.time()
        result = od.detect_objects(imgcv)
        end = time.time()

        total_classes_in_config += len(config_images[image_file])

        if len(result["recognizedObjects"]) > 0:
            objects = result["recognizedObjects"]
            result_image = os.path.join(args.output_dir,
                                        "result_" + image_file)
            prediction_time += (end - start)

            imgcv = v.draw_boxes_and_labels(imgcv, objects)
            cv2.imwrite(result_image, imgcv)

            for object in objects:
                if result_in_annotations(config_images[image_file],
                                         object["class_name"]):
                    predicted_valid += 1
                    # print("valid: {}".format(image_file))
                else:
                    predicted_false += 1
                    print("{} predicted not tagged class: {}".format(
                        image_file, object["class_name"]))
        else:
            empty_result += 1

    print(
        "validation classes: {} valid: {} false: {} empty results: {} total: {:.2f}% avg_time {:.2f}s".
        format(total_classes_in_config, predicted_valid, predicted_false,
               empty_result, predicted_valid / total_classes_in_config * 100,
               prediction_time / validated_images))

    od.shutdown()
Esempio n. 5
0
def main():
    parser = argparse.ArgumentParser(description='Process some integers.')
    parser.add_argument('--sloth-cfg', required=True)
    parser.add_argument('--record-file', required=True)
    parser.add_argument('--image-dir', required=True)
    parser.add_argument('--labels', required=True)
    args = parser.parse_args()

    if not os.path.isfile(args.sloth_cfg):
        print("Config-File does not exist...")
        exit()
    if not os.path.isdir(args.image_dir):
        print("Image dir does not exist...")
        exit()
    if not os.path.isfile(args.labels):
        print("Labels-File does not exist...")
        exit()

    writer = tf.python_io.TFRecordWriter(args.record_file)

    label_map_dict = label_map_util.get_label_map_dict(args.labels)
    config = tools.read_json_file(args.sloth_cfg)

    images_config = []
    images_count = 0
    annotation_count = 0
    start = time.time()
    for image in config:
        image_file = os.path.join(args.image_dir, image["filename"])
        if not os.path.isfile(image_file):
            print("image {} not found on directory".format(image["filename"]))
            continue
        extension = image_file.split(".")[-1]
        if extension not in extensions:
            print("image {} has wrong extension...".format(extension))
            continue
        if len(image["annotations"]) == 0:
            print("image {} has no annotations".format(image["filename"]))
            continue

        images_config.append(image["filename"])
        images_count += 1

        image_height, image_width, channels = scipy.ndimage.imread(
            image_file).shape

        encoded_jpg = tf.gfile.GFile(image_file, 'rb').read()
        key = hashlib.sha256(encoded_jpg).hexdigest()

        xmin = []
        ymin = []
        xmax = []
        ymax = []
        classes = []
        classes_text = []

        for annotation in image["annotations"]:
            annotation_count += 1
            a_xmin, a_xmax, a_ymin, a_ymax = fix_outside_roi_points(
                annotation, image_width, image_height)

            xmin.append(float(a_xmin) / image_width)
            ymin.append(float(a_ymin) / image_height)
            xmax.append(float(a_xmax) / image_width)
            ymax.append(float(a_ymax) / image_height)

            classes_text.append(annotation["class"].encode('utf8'))
            classes.append(label_map_dict[annotation["class"]])

        example = tf.train.Example(features=tf.train.Features(
            feature={
                'image/height':
                dataset_util.int64_feature(image_height),
                'image/width':
                dataset_util.int64_feature(image_width),
                'image/filename':
                dataset_util.bytes_feature(image["filename"].encode('utf8')),
                'image/source_id':
                dataset_util.bytes_feature(image['filename'].encode('utf8')),
                'image/key/sha256':
                dataset_util.bytes_feature(key.encode('utf8')),
                'image/encoded':
                dataset_util.bytes_feature(encoded_jpg),
                'image/format':
                dataset_util.bytes_feature(image["filename"].split(".")
                                           [-1].encode('utf8')),
                'image/object/bbox/xmin':
                dataset_util.float_list_feature(xmin),
                'image/object/bbox/xmax':
                dataset_util.float_list_feature(xmax),
                'image/object/bbox/ymin':
                dataset_util.float_list_feature(ymin),
                'image/object/bbox/ymax':
                dataset_util.float_list_feature(ymax),
                'image/object/class/text':
                dataset_util.bytes_list_feature(classes_text),
                'image/object/class/label':
                dataset_util.int64_list_feature(classes),
            }))

        writer.write(example.SerializeToString())

    writer.close()

    end = time.time()
    print("conversion done in {} seconds for {} images and {} annotations".
          format(end - start, images_count, annotation_count))
Esempio n. 6
0
def main():
    args = get_args()
    validate_images = []

    for extension in extensions:
        for file in glob.glob(args.image_dir + '*.' + extension):
            validate_images.append(file)

    # remove duplicate images from image_dir.
    # maybe because of same upper and lowercase extension
    validate_images = list(set(validate_images))
    validate_images.sort()

    validation_config = tools.read_json_file(args.validate_json)
    config = dict()
    if args.config is not None:
        config = tools.load_config(args.config)
    else:
        config["path_to_ckpt"] = args.graph
        config["label_map"] = args.mapping_file
        config["num_classes"] = int(args.num_classes)
        config["model_name"] = args.model_name

    config_images = {}
    for image in validation_config:
        config_images[image["filename"]] = image["annotations"]

    if len(config_images) == 0:
        print("no images found in config...")
        exit()

    gt_dict = gt_tools.prepare_ground_truth(validation_config,
                                            im_height=args.image_height,
                                            im_width=args.image_width)

    od = ObjectDetection(config,
                         gt_dict,
                         args.image_width,
                         args.image_height,
                         image_annotation=args.result_json is not None,
                         image_visualization=args.bounding_box,
                         gpu_usage=args.gpu_usage)
    od.start_up()

    images = list()
    for image in validate_images:
        images.append(os.path.basename(image))

    numpy_array = read_images(validate_images)

    result = od.detect_objects(numpy_array, images)

    print('METRIC: {} '.format(result['result_metric']))

    if args.result_json is not None:
        save_result_annotation(result['result_annotation'], images,
                               args.image_dir, args.result_json)

    if args.output_dir is not None:
        save_result_images(result['result_images'], args.output_dir, images)

    od.shutdown()
Esempio n. 7
0
from utils import prepare_ground_truth as gt_tools
from utils import tools
# TODO proper TestCase

inputfile = 'c:\\tmp\\eval_real_new\\final_config.json'

json_data = tools.read_json_file(inputfile)

gt_dict = gt_tools.prepare_ground_truth(json_data)

print

print(str(gt_dict))
Esempio n. 8
0
    parser.add_argument('-c', '--config', required=True)

    args = parser.parse_args()

    if not os.path.isfile(args.config):
        print("[ERROR] config file does not exist... {}".format(args.config))
        exit(1)

    return args


if __name__ == '__main__':
    args = get_args()

    config = read_json_file(args.config)

    images = 0
    tags = OrderedDict()
    for file in config:
        images += 1

        for tag in file["annotations"]:
            class_name = tag["class"]
            if tag["class"] in tags:
                tags[class_name] += 1
            else:
                tags[class_name] = 1

    ranking = sorted(tags.items(), key=lambda x: x[1], reverse=True)
    print("images: {}".format(images))